Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
test_index = dataset.test_index
test_label = np.expand_dims(dataset.y[test_index], -1)
test_index = np.expand_dims(test_index, -1)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
train_program = fluid.Program()
startup_program = fluid.Program()
test_program = fluid.Program()
hidden_size = 16
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.StaticGraphWrapper(
name="graph", graph=dataset.graph, place=place)
output = pgl.layers.gat(gw,
gw.node_feat["words"],
hidden_size,
activation="elu",
name="gat_layer_1",
num_heads=8,
feat_drop=0.6,
attn_drop=0.6,
is_test=False)
output = pgl.layers.gat(gw,
output,
dataset.num_classes,
num_heads=1,
activation=None,
name="gat_layer_2",
feat_drop=0.6,
attn_drop=0.6,
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.GraphWrapper(
name="graph",
place=place,
node_feat=dataset.graph.node_feat_info())
output = pgl.layers.gat(gw,
gw.node_feat["words"],
hidden_size,
activation="elu",
name="gat_layer_1",
num_heads=8,
feat_drop=0.6,
attn_drop=0.6,
is_test=False)
output = pgl.layers.gat(gw,
output,
dataset.num_classes,
num_heads=1,
activation=None,
name="gat_layer_2",
feat_drop=0.6,
attn_drop=0.6,
is_test=False)
node_index = fluid.layers.data(
"node_index",
shape=[None, 1],
dtype="int64",
append_batch_size=False)
node_label = fluid.layers.data(
"node_label",
shape=[None, 1],
hidden_size = 16
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.StaticGraphWrapper(
name="graph", graph=dataset.graph, place=place)
output = pgl.layers.gat(gw,
gw.node_feat["words"],
hidden_size,
activation="elu",
name="gat_layer_1",
num_heads=8,
feat_drop=0.6,
attn_drop=0.6,
is_test=False)
output = pgl.layers.gat(gw,
output,
dataset.num_classes,
num_heads=1,
activation=None,
name="gat_layer_2",
feat_drop=0.6,
attn_drop=0.6,
is_test=False)
val_program = train_program.clone(for_test=True)
test_program = train_program.clone(for_test=True)
initializer = []
with fluid.program_guard(train_program, startup_program):
train_node_index, init = paddle_helper.constant(
"train_node_index", dtype="int64", value=train_index)
def main(args):
dataset = load(args.dataset)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
train_program = fluid.Program()
startup_program = fluid.Program()
test_program = fluid.Program()
hidden_size = 8
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.GraphWrapper(
name="graph",
place=place,
node_feat=dataset.graph.node_feat_info())
output = pgl.layers.gat(gw,
gw.node_feat["words"],
hidden_size,
activation="elu",
name="gat_layer_1",
num_heads=8,
feat_drop=0.6,
attn_drop=0.6,
is_test=False)
output = pgl.layers.gat(gw,
output,
dataset.num_classes,
num_heads=1,
activation=None,
name="gat_layer_2",
feat_drop=0.6,
attn_drop=0.6,
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.GraphWrapper(
name="graph",
place=place,
node_feat=dataset.graph.node_feat_info())
output = pgl.layers.gcn(gw,
gw.node_feat["words"],
hidden_size,
activation="relu",
norm=gw.node_feat['norm'],
name="gcn_layer_1")
output = fluid.layers.dropout(
output, 0.5, dropout_implementation='upscale_in_train')
output = pgl.layers.gcn(gw,
output,
dataset.num_classes,
activation=None,
norm=gw.node_feat['norm'],
name="gcn_layer_2")
node_index = fluid.layers.data(
"node_index",
shape=[None, 1],
dtype="int64",
append_batch_size=False)
node_label = fluid.layers.data(
"node_label",
shape=[None, 1],
dtype="int64",
append_batch_size=False)
val_index = np.expand_dims(val_index, -1)
test_index = dataset.test_index
test_label = np.expand_dims(dataset.y[test_index], -1)
test_index = np.expand_dims(test_index, -1)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
train_program = fluid.Program()
startup_program = fluid.Program()
test_program = fluid.Program()
hidden_size = 16
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.StaticGraphWrapper(
name="graph", graph=dataset.graph, place=place)
output = pgl.layers.gcn(gw,
gw.node_feat["words"],
hidden_size,
activation="relu",
norm=gw.node_feat['norm'],
name="gcn_layer_1")
output = fluid.layers.dropout(
output, 0.5, dropout_implementation='upscale_in_train')
output = pgl.layers.gcn(gw,
output,
dataset.num_classes,
activation=None,
norm=gw.node_feat['norm'],
name="gcn_layer_2")
val_program = train_program.clone(for_test=True)
test_program = train_program.clone(for_test=True)
norm[indegree > 0] = np.power(indegree[indegree > 0], -0.5)
dataset.graph.node_feat["norm"] = np.expand_dims(norm, -1)
place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
train_program = fluid.Program()
startup_program = fluid.Program()
test_program = fluid.Program()
hidden_size = 16
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.GraphWrapper(
name="graph",
place=place,
node_feat=dataset.graph.node_feat_info())
output = pgl.layers.gcn(gw,
gw.node_feat["words"],
hidden_size,
activation="relu",
norm=gw.node_feat['norm'],
name="gcn_layer_1")
output = fluid.layers.dropout(
output, 0.5, dropout_implementation='upscale_in_train')
output = pgl.layers.gcn(gw,
output,
dataset.num_classes,
activation=None,
norm=gw.node_feat['norm'],
name="gcn_layer_2")
node_index = fluid.layers.data(
"node_index",
shape=[None, 1],
startup_program = fluid.Program()
test_program = fluid.Program()
hidden_size = 16
with fluid.program_guard(train_program, startup_program):
gw = pgl.graph_wrapper.StaticGraphWrapper(
name="graph", graph=dataset.graph, place=place)
output = pgl.layers.gcn(gw,
gw.node_feat["words"],
hidden_size,
activation="relu",
norm=gw.node_feat['norm'],
name="gcn_layer_1")
output = fluid.layers.dropout(
output, 0.5, dropout_implementation='upscale_in_train')
output = pgl.layers.gcn(gw,
output,
dataset.num_classes,
activation=None,
norm=gw.node_feat['norm'],
name="gcn_layer_2")
val_program = train_program.clone(for_test=True)
test_program = train_program.clone(for_test=True)
initializer = []
with fluid.program_guard(train_program, startup_program):
train_node_index, init = paddle_helper.constant(
"train_node_index", dtype="int64", value=train_index)
initializer.append(init)
train_node_label, init = paddle_helper.constant(