How to use the spektral.layers.GraphConv function in spektral

To help you get started, we’ve selected a few spektral examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github danielegrattarola / spektral / tests / benchmarks / citation / citation.py View on Github external
N = X.shape[0]          # Number of nodes in the graph
        F = X.shape[1]          # Original feature dimensionality
        n_classes = y.shape[1]  # Number of classes

        # Preprocessing operations
        fltr = c['fltr'](A)

        # Model definition
        X_in = Input(shape=(F, ))
        fltr_in = Input((N, ), sparse=c['sparse'])

        gc_1 = Dropout(dropout_rate)(X_in)
        for _ in range(c['n_layers']):
            gc_1 = c['layer'](**dict(base_kwargs, **c['kwargs']))([gc_1, fltr_in])
        gc_2 = Dropout(dropout_rate)(gc_1)
        gc_2 = GraphConv(n_classes, activation='softmax')([gc_2, fltr_in])

        # Build model
        model = Model(inputs=[X_in, fltr_in], outputs=gc_2)
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      weighted_metrics=['acc'])
        if i == 0:
            weights.append((
                c['layer'].__name__, sum([i.size for i in model.get_weights()])
            ))

        # Callbacks
        callbacks = [
            EarlyStopping(monitor='val_weighted_acc',
                          patience=es_patience,
github danielegrattarola / spektral / tests / benchmarks / citation / citation.py View on Github external
dropout_rate = 0.5      # Dropout rate applied to the input of GCN layers
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 1e-3    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping
runs = 100

base_kwargs = {
    'channels': 16,
    'activation': 'relu',
    'kernel_regularizer': l2(l2_reg),
}

CONFIG = [
    {
        'layer': GraphConv,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': GraphConvSkip,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': ARMAConv,
        'n_layers': 1,
        'kwargs': {
github danielegrattarola / spektral / tests / benchmarks / node_classification / node_classification.py View on Github external
N = X.shape[0]          # Number of nodes in the graph
        F = X.shape[1]          # Original feature dimensionality
        n_classes = y.shape[1]  # Number of classes

        # Preprocessing operations
        fltr = c['fltr'](A)

        # Model definition
        X_in = Input(shape=(F, ))
        fltr_in = Input((N, ), sparse=c['sparse'])

        gc_1 = Dropout(dropout_rate)(X_in)
        for _ in range(c['n_layers']):
            gc_1 = c['layer'](**dict(base_kwargs, **c['kwargs']))([gc_1, fltr_in])
        gc_2 = Dropout(dropout_rate)(gc_1)
        gc_2 = GraphConv(n_classes, activation='softmax')([gc_2, fltr_in])

        # Build model
        model = Model(inputs=[X_in, fltr_in], outputs=gc_2)
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      weighted_metrics=['acc'])
        if i == 0:
            weights.append((
                c['layer'].__name__, sum([i.size for i in model.get_weights()])
            ))

        # Callbacks
        callbacks = [
            EarlyStopping(monitor='val_weighted_acc',
                          patience=es_patience,
github danielegrattarola / spektral / tests / benchmarks / node_classification / node_classification.py View on Github external
dropout_rate = 0.5      # Dropout rate applied to the input of GCN layers
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 5e-3    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 50        # Patience for early stopping
runs = 10

base_kwargs = {
    'channels': 32,
    'activation': 'relu',
    'kernel_regularizer': l2(l2_reg),
}

CONFIG = [
    {
        'layer': GraphConv,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': GraphConvSkip,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': ARMAConv,
        'n_layers': 1,
        'kwargs': {
github danielegrattarola / spektral / examples / node_classification_simple_gc.py View on Github external
l2_reg = 5e-6           # Regularization rate for l2
learning_rate = 0.2     # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping

# Preprocessing operations
fltr = localpooling_filter(A)

# Pre-compute propagation
for i in range(K - 1):
    fltr = fltr.dot(fltr)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
output = GraphConv(n_classes,
                   activation='softmax',
                   kernel_regularizer=l2(l2_reg),
                   use_bias=False)([X_in, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

# Train model
validation_data = ([X, fltr], y, val_mask)
model.fit([X, fltr],
          y,
github danielegrattarola / spektral / examples / node_classification_gcn.py View on Github external
es_patience = 200       # Patience for early stopping

# Preprocessing operations
fltr = localpooling_filter(A)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = GraphConv(n_classes,
                         activation='softmax',
                         use_bias=False)([dropout_2, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()

# Train model
validation_data = ([X, fltr], y, val_mask)
model.fit([X, fltr],
          y,
          sample_weight=train_mask,
github danielegrattarola / spektral / examples / graph_signal_classification_mnist.py View on Github external
F = X_train.shape[-1]      # Node features dimensionality
n_out = y_train.shape[-1]  # Dimension of the target

fltr = normalized_laplacian(adj)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([graph_conv, A_in])
flatten = Flatten()(graph_conv)
fc = Dense(512, activation='relu')(flatten)
output = Dense(n_out, activation='softmax')(fc)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])
model.summary()

# Train model
github danielegrattarola / spektral / examples / graph_signal_classification_mnist.py View on Github external
# Load data
X_train, y_train, X_val, y_val, X_test, y_test, adj = mnist.load_data()
X_train, X_val, X_test = X_train[..., None], X_val[..., None], X_test[..., None]
N = X_train.shape[-2]      # Number of nodes in the graphs
F = X_train.shape[-1]      # Node features dimensionality
n_out = y_train.shape[-1]  # Dimension of the target

fltr = normalized_laplacian(adj)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([graph_conv, A_in])
flatten = Flatten()(graph_conv)
fc = Dense(512, activation='relu')(flatten)
output = Dense(n_out, activation='softmax')(fc)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='sparse_categorical_crossentropy',
github danielegrattarola / spektral / docs / autogen.py View on Github external
EXCLUDE = {}

# For each class to document, it is possible to:
# 1) Document only the class: [classA, classB, ...]
# 2) Document all its methods: [classA, (classB, "*")]
# 3) Choose which methods to document (methods listed as strings):
# [classA, (classB, ["method1", "method2", ...]), ...]
# 4) Choose which methods to document (methods listed as qualified names):
# [classA, (classB, [module.classB.method1, module.classB.method2, ...]), ...]

PAGES = [
    {
        'page': 'layers/convolution.md',
        'classes': [
            layers.GraphConv,
            layers.ChebConv,
            layers.GraphSageConv,
            layers.ARMAConv,
            layers.EdgeConditionedConv,
            layers.GraphAttention,
            layers.GraphConvSkip,
            layers.APPNP,
            layers.GINConv
        ]
    },
    {
        'page': 'layers/base.md',
        'functions': [],
        'methods': [],
        'classes': [
            layers.InnerProduct,
github danielegrattarola / spektral / examples / node_classification_gcn.py View on Github external
n_classes = y.shape[1]  # Number of classes
dropout = 0.5           # Dropout rate applied to the features
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 1e-2    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping

# Preprocessing operations
fltr = localpooling_filter(A)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = GraphConv(channels,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = GraphConv(n_classes,
                         activation='softmax',
                         use_bias=False)([dropout_2, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              weighted_metrics=['acc'])
model.summary()