Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import tensorflow as tf
from keras import backend as K, Model, Input
from spektral.layers import GraphConv, ChebConv, EdgeConditionedConv, GraphAttention, GraphConvSkip, ARMAConv, APPNP, \
GraphSageConv, GINConv
SINGLE, BATCH, MIXED = 1, 2, 3 # Single, batch, mixed
LAYER_K_, MODES_K_, KWARGS_K_ = 'layer', 'modes', 'kwargs'
TESTS = [
{
LAYER_K_: GraphConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: ChebConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: GraphSageConv,
MODES_K_: [SINGLE],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: EdgeConditionedConv,
MODES_K_: [SINGLE, BATCH],
KWARGS_K_: {'channels': 8, 'activation': 'relu', 'edges': True}
},
{
LAYER_K_: GraphAttention,
MODES_K_: [SINGLE, BATCH, MIXED],
# Preprocessing operations
fltr = chebyshev_filter(A, cheb_k)
# Model definition
X_in = Input(shape=(F, ))
# One input filter for each degree of the Chebyshev approximation
fltr_in = [Input((N, ), sparse=True) for _ in range(support)]
dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = ChebConv(channels,
activation='relu',
kernel_regularizer=l2(l2_reg),
use_bias=False)([dropout_1] + fltr_in)
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = ChebConv(n_classes,
activation='softmax',
use_bias=False)([dropout_2] + fltr_in)
# Build model
model = Model(inputs=[X_in] + fltr_in, outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
weighted_metrics=['acc'])
model.summary()
# Train model
validation_data = ([X] + fltr, y, val_mask)
model.fit([X] + fltr,
y,
sample_weight=train_mask,
dropout = 0.5 # Dropout rate applied to the features
l2_reg = 5e-4 # Regularization rate for l2
learning_rate = 1e-2 # Learning rate for SGD
epochs = 20000 # Number of training epochs
es_patience = 200 # Patience for early stopping
# Preprocessing operations
fltr = chebyshev_filter(A, cheb_k)
# Model definition
X_in = Input(shape=(F, ))
# One input filter for each degree of the Chebyshev approximation
fltr_in = [Input((N, ), sparse=True) for _ in range(support)]
dropout_1 = Dropout(dropout)(X_in)
graph_conv_1 = ChebConv(channels,
activation='relu',
kernel_regularizer=l2(l2_reg),
use_bias=False)([dropout_1] + fltr_in)
dropout_2 = Dropout(dropout)(graph_conv_1)
graph_conv_2 = ChebConv(n_classes,
activation='softmax',
use_bias=False)([dropout_2] + fltr_in)
# Build model
model = Model(inputs=[X_in] + fltr_in, outputs=graph_conv_2)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
weighted_metrics=['acc'])
model.summary()
EXCLUDE = {}
# For each class to document, it is possible to:
# 1) Document only the class: [classA, classB, ...]
# 2) Document all its methods: [classA, (classB, "*")]
# 3) Choose which methods to document (methods listed as strings):
# [classA, (classB, ["method1", "method2", ...]), ...]
# 4) Choose which methods to document (methods listed as qualified names):
# [classA, (classB, [module.classB.method1, module.classB.method2, ...]), ...]
PAGES = [
{
'page': 'layers/convolution.md',
'classes': [
layers.GraphConv,
layers.ChebConv,
layers.GraphSageConv,
layers.ARMAConv,
layers.EdgeConditionedConv,
layers.GraphAttention,
layers.GraphConvSkip,
layers.APPNP,
layers.GINConv
]
},
{
'page': 'layers/base.md',
'functions': [],
'methods': [],
'classes': [
layers.InnerProduct,
layers.MinkowskiProduct