Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
X_1, A_1, I_1, M_1 = MinCutPool(k=int(average_N // 2),
h=mincut_H,
activation=activ,
kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])
# Block 2
gc2 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
h=mincut_H,
activation=activ,
kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])
# Block 3
X_3 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_2, A_2])
# Output block
avgpool = GlobalAvgPool()([X_3, I_2])
output = Dense(n_out, activation='softmax')(avgpool)
# Build model
model = Model([X_in, A_in, I_in], output)
model.compile(optimizer='adam', loss='categorical_crossentropy', target_tensors=[target])
model.summary()
# Training setup
sess = K.get_session()
loss = model.total_loss
acc = K.mean(categorical_accuracy(target, model.output))
# Parameters
F = X_train[0].shape[-1] # Dimension of node features
n_out = y_train[0].shape[-1] # Dimension of the target
average_N = np.ceil(np.mean([a.shape[-1] for a in A_train])) # Average number of nodes in dataset
################################################################################
# BUILD MODEL
################################################################################
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), sparse=True)
I_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))
target = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))
# Block 1
gc1 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_in, A_in])
X_1, A_1, I_1, M_1 = MinCutPool(k=int(average_N // 2),
h=mincut_H,
activation=activ,
kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])
# Block 2
gc2 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
h=mincut_H,
activation=activ,
kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), sparse=True)
I_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))
target = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))
# Block 1
gc1 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_in, A_in])
X_1, A_1, I_1, M_1 = MinCutPool(k=int(average_N // 2),
h=mincut_H,
activation=activ,
kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])
# Block 2
gc2 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
h=mincut_H,
activation=activ,
kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])
# Block 3
X_3 = GraphConvSkip(n_channels,
activation=activ,
kernel_regularizer=l2(GNN_l2))([X_2, A_2])
# Output block
avgpool = GlobalAvgPool()([X_3, I_2])
output = Dense(n_out, activation='softmax')(avgpool)
base_kwargs = {
'channels': 32,
'activation': 'relu',
'kernel_regularizer': l2(l2_reg),
}
CONFIG = [
{
'layer': GraphConv,
'n_layers': neighbourhood,
'kwargs': {},
'fltr': lambda A: localpooling_filter(A),
'sparse': True
},
{
'layer': GraphConvSkip,
'n_layers': neighbourhood,
'kwargs': {},
'fltr': lambda A: localpooling_filter(A),
'sparse': True
},
{
'layer': ARMAConv,
'n_layers': 1,
'kwargs': {
'T': neighbourhood,
'K': 1,
'recurrent': True,
'dropout_rate': dropout_rate
},
'fltr': lambda A: rescale_laplacian(normalized_laplacian(A), lmax=2),
'sparse': True
base_kwargs = {
'channels': 16,
'activation': 'relu',
'kernel_regularizer': l2(l2_reg),
}
CONFIG = [
{
'layer': GraphConv,
'n_layers': neighbourhood,
'kwargs': {},
'fltr': lambda A: localpooling_filter(A),
'sparse': True
},
{
'layer': GraphConvSkip,
'n_layers': neighbourhood,
'kwargs': {},
'fltr': lambda A: localpooling_filter(A),
'sparse': True
},
{
'layer': ARMAConv,
'n_layers': 1,
'kwargs': {
'T': neighbourhood,
'K': 1,
'recurrent': True
},
'fltr': lambda A: rescale_laplacian(normalized_laplacian(A), lmax=2),
'sparse': True
},
LAYER_K_: GraphSageConv,
MODES_K_: [SINGLE],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: EdgeConditionedConv,
MODES_K_: [SINGLE, BATCH],
KWARGS_K_: {'channels': 8, 'activation': 'relu', 'edges': True}
},
{
LAYER_K_: GraphAttention,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'attn_heads': 2, 'concat_heads': False, 'activation': 'relu'}
},
{
LAYER_K_: GraphConvSkip,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: ARMAConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu', 'order': 2, 'iterations': 2, 'share_weights': True}
},
{
LAYER_K_: APPNP,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu', 'mlp_hidden': [16]}
},
{
LAYER_K_: GINConv,
MODES_K_: [SINGLE],
# 3) Choose which methods to document (methods listed as strings):
# [classA, (classB, ["method1", "method2", ...]), ...]
# 4) Choose which methods to document (methods listed as qualified names):
# [classA, (classB, [module.classB.method1, module.classB.method2, ...]), ...]
PAGES = [
{
'page': 'layers/convolution.md',
'classes': [
layers.GraphConv,
layers.ChebConv,
layers.GraphSageConv,
layers.ARMAConv,
layers.EdgeConditionedConv,
layers.GraphAttention,
layers.GraphConvSkip,
layers.APPNP,
layers.GINConv
]
},
{
'page': 'layers/base.md',
'functions': [],
'methods': [],
'classes': [
layers.InnerProduct,
layers.MinkowskiProduct
]
},
{
'page': 'layers/pooling.md',
'functions': [],