How to use the spektral.layers.ARMAConv function in spektral

To help you get started, we’ve selected a few spektral examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github danielegrattarola / spektral / tests / benchmarks / citation / citation.py View on Github external
{
        'layer': GraphConv,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': GraphConvSkip,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': ARMAConv,
        'n_layers': 1,
        'kwargs': {
            'T': neighbourhood,
            'K': 1,
            'recurrent': True
        },
        'fltr': lambda A: rescale_laplacian(normalized_laplacian(A), lmax=2),
        'sparse': True
    },
    {
        'layer': GraphAttention,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: A,
        'sparse': False
    },
github danielegrattarola / spektral / tests / test_layers / test_convolutional.py View on Github external
LAYER_K_: EdgeConditionedConv,
        MODES_K_: [SINGLE, BATCH],
        KWARGS_K_: {'channels': 8, 'activation': 'relu', 'edges': True}
    },
    {
        LAYER_K_: GraphAttention,
        MODES_K_: [SINGLE, BATCH, MIXED],
        KWARGS_K_: {'channels': 8, 'attn_heads': 2, 'concat_heads': False, 'activation': 'relu'}
    },
    {
        LAYER_K_: GraphConvSkip,
        MODES_K_: [SINGLE, BATCH, MIXED],
        KWARGS_K_: {'channels': 8, 'activation': 'relu'}
    },
    {
        LAYER_K_: ARMAConv,
        MODES_K_: [SINGLE, BATCH, MIXED],
        KWARGS_K_: {'channels': 8, 'activation': 'relu', 'order': 2, 'iterations': 2, 'share_weights': True}
    },
    {
        LAYER_K_: APPNP,
        MODES_K_: [SINGLE, BATCH, MIXED],
        KWARGS_K_: {'channels': 8, 'activation': 'relu', 'mlp_hidden': [16]}
    },
    {
        LAYER_K_: GINConv,
        MODES_K_: [SINGLE],
        KWARGS_K_: {'channels': 8, 'activation': 'relu', 'n_hidden_layers': 1}
    }
]

sess = K.get_session()
github danielegrattarola / spektral / tests / benchmarks / node_classification / node_classification.py View on Github external
{
        'layer': GraphConv,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': GraphConvSkip,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': ARMAConv,
        'n_layers': 1,
        'kwargs': {
            'T': neighbourhood,
            'K': 1,
            'recurrent': True,
            'dropout_rate': dropout_rate
        },
        'fltr': lambda A: rescale_laplacian(normalized_laplacian(A), lmax=2),
        'sparse': True
    },
    {
        'layer': GraphAttention,
        'n_layers': neighbourhood,
        'kwargs': {
            'dropout_rate': dropout_rate
        },
github danielegrattarola / spektral / examples / node_classification_arma.py View on Github external
dropout_rate = 0.75     # Dropout rate applied to the input of GCN layers
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 1e-2    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping

# Preprocessing operations
fltr = normalized_laplacian(A, symmetric=True)
fltr = rescale_laplacian(fltr, lmax=2)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout_rate)(X_in)
graph_conv_1 = ARMAConv(16,
                        T=ARMA_T,
                        K=ARMA_K,
                        recurrent=recurrent,
                        dropout_rate=dropout_rate,
                        activation='elu',
                        gcn_activation='elu',
                        kernel_regularizer=l2(l2_reg))([dropout_1, fltr_in])
dropout_2 = Dropout(dropout_rate)(graph_conv_1)
graph_conv_2 = ARMAConv(n_classes,
                        T=1,
                        K=1,
                        recurrent=recurrent,
                        dropout_rate=dropout_rate,
                        activation='softmax',
                        gcn_activation=None,
                        kernel_regularizer=l2(l2_reg))([dropout_2, fltr_in])
github danielegrattarola / spektral / examples / node_classification_arma.py View on Github external
# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout_rate)(X_in)
graph_conv_1 = ARMAConv(16,
                        T=ARMA_T,
                        K=ARMA_K,
                        recurrent=recurrent,
                        dropout_rate=dropout_rate,
                        activation='elu',
                        gcn_activation='elu',
                        kernel_regularizer=l2(l2_reg))([dropout_1, fltr_in])
dropout_2 = Dropout(dropout_rate)(graph_conv_1)
graph_conv_2 = ARMAConv(n_classes,
                        T=1,
                        K=1,
                        recurrent=recurrent,
                        dropout_rate=dropout_rate,
                        activation='softmax',
                        gcn_activation=None,
                        kernel_regularizer=l2(l2_reg))([dropout_2, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()
github danielegrattarola / spektral / docs / autogen.py View on Github external
# For each class to document, it is possible to:
# 1) Document only the class: [classA, classB, ...]
# 2) Document all its methods: [classA, (classB, "*")]
# 3) Choose which methods to document (methods listed as strings):
# [classA, (classB, ["method1", "method2", ...]), ...]
# 4) Choose which methods to document (methods listed as qualified names):
# [classA, (classB, [module.classB.method1, module.classB.method2, ...]), ...]

PAGES = [
    {
        'page': 'layers/convolution.md',
        'classes': [
            layers.GraphConv,
            layers.ChebConv,
            layers.GraphSageConv,
            layers.ARMAConv,
            layers.EdgeConditionedConv,
            layers.GraphAttention,
            layers.GraphConvSkip,
            layers.APPNP,
            layers.GINConv
        ]
    },
    {
        'page': 'layers/base.md',
        'functions': [],
        'methods': [],
        'classes': [
            layers.InnerProduct,
            layers.MinkowskiProduct
        ]
    },