Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
LAYER_K_: GraphConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: ChebConv,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: GraphSageConv,
MODES_K_: [SINGLE],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: EdgeConditionedConv,
MODES_K_: [SINGLE, BATCH],
KWARGS_K_: {'channels': 8, 'activation': 'relu', 'edges': True}
},
{
LAYER_K_: GraphAttention,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'attn_heads': 2, 'concat_heads': False, 'activation': 'relu'}
},
{
LAYER_K_: GraphConvSkip,
MODES_K_: [SINGLE, BATCH, MIXED],
KWARGS_K_: {'channels': 8, 'activation': 'relu'}
},
{
LAYER_K_: ARMAConv,
MODES_K_: [SINGLE, BATCH, MIXED],
batch_size = 32 # Batch size
es_patience = 5 # Patience fot early stopping
# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1)
# Model definition
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))
E_in = Input(shape=(N, N, S))
gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
gc2 = EdgeConditionedConv(32, activation='relu')([gc1, A_in, E_in])
pool = GlobalAvgPool()(gc2)
output = Dense(n_out)(pool)
# Build model
model = Model(inputs=[X_in, A_in, E_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.summary()
# Train model
model.fit([X_train, A_train, E_train],
y_train,
batch_size=batch_size,
validation_split=0.1,
epochs=epochs,
callbacks=[
batch_size = 32 # Batch size
# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1)
# Model definition
X_in = Input(batch_shape=(None, F))
A_in = Input(batch_shape=(None, None))
E_in = Input(batch_shape=(None, None, S))
I_in = Input(batch_shape=(None, ), dtype='int64')
target = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))
gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
gc2 = EdgeConditionedConv(32, activation='relu')([gc1, A_in, E_in])
pool = GlobalAvgPool()([gc2, I_in])
output = Dense(n_out)(pool)
# Build model
model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse', target_tensors=target)
model.summary()
# Training setup
sess = K.get_session()
loss = model.total_loss
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_step = opt.minimize(loss)
init_op = tf.global_variables_initializer()
epochs = 25 # Number of training epochs
batch_size = 32 # Batch size
es_patience = 5 # Patience fot early stopping
# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1)
# Model definition
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))
E_in = Input(shape=(N, N, S))
gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
gc2 = EdgeConditionedConv(32, activation='relu')([gc1, A_in, E_in])
pool = GlobalAvgPool()(gc2)
output = Dense(n_out)(pool)
# Build model
model = Model(inputs=[X_in, A_in, E_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.summary()
# Train model
model.fit([X_train, A_train, E_train],
y_train,
batch_size=batch_size,
validation_split=0.1,
epochs=epochs,
# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1)
# Model definition
X_in = Input(batch_shape=(None, F))
A_in = Input(batch_shape=(None, None))
E_in = Input(batch_shape=(None, None, S))
I_in = Input(batch_shape=(None, ), dtype='int64')
target = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))
gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
gc2 = EdgeConditionedConv(32, activation='relu')([gc1, A_in, E_in])
pool = GlobalAvgPool()([gc2, I_in])
output = Dense(n_out)(pool)
# Build model
model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse', target_tensors=target)
model.summary()
# Training setup
sess = K.get_session()
loss = model.total_loss
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_step = opt.minimize(loss)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 1) Document only the class: [classA, classB, ...]
# 2) Document all its methods: [classA, (classB, "*")]
# 3) Choose which methods to document (methods listed as strings):
# [classA, (classB, ["method1", "method2", ...]), ...]
# 4) Choose which methods to document (methods listed as qualified names):
# [classA, (classB, [module.classB.method1, module.classB.method2, ...]), ...]
PAGES = [
{
'page': 'layers/convolution.md',
'classes': [
layers.GraphConv,
layers.ChebConv,
layers.GraphSageConv,
layers.ARMAConv,
layers.EdgeConditionedConv,
layers.GraphAttention,
layers.GraphConvSkip,
layers.APPNP,
layers.GINConv
]
},
{
'page': 'layers/base.md',
'functions': [],
'methods': [],
'classes': [
layers.InnerProduct,
layers.MinkowskiProduct
]
},
{