How to use the pgl.graph_wrapper function in pgl

To help you get started, we’ve selected a few pgl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PaddlePaddle / PGL / examples / unsup_graphsage / train.py View on Github external
def build_graph_model(args):
    """build_graph_model"""
    node_feature_info = [('index', [None], np.dtype('int64'))]

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    graph_wrappers = []
    feed_list = []

    graph_wrappers.append(
        pgl.graph_wrapper.GraphWrapper(
            "layer_0", fluid.CPUPlace(), node_feat=node_feature_info))
    #edge_feat=[("f", [None, 1], "float32")]))

    num_embed = args.num_nodes

    num_layers = args.num_layers

    src_index = fluid.layers.data(
        "src_index", shape=[None], dtype="int64", append_batch_size=False)

    dst_index = fluid.layers.data(
        "dst_index", shape=[None], dtype="int64", append_batch_size=False)

    feature = fluid.layers.embedding(
        input=fluid.layers.reshape(graph_wrappers[0].node_feat['index'],
                                   [-1, 1]),
github PaddlePaddle / PGL / examples / gcn / train.py View on Github external
dataset = load(args.dataset)

    # normalize
    indegree = dataset.graph.indegree()
    norm = np.zeros_like(indegree, dtype="float32")
    norm[indegree > 0] = np.power(indegree[indegree > 0], -0.5)
    dataset.graph.node_feat["norm"] = np.expand_dims(norm, -1)

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 16

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            place=place,
            node_feat=dataset.graph.node_feat_info())

        output = pgl.layers.gcn(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="relu",
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_1")
        output = fluid.layers.dropout(
            output, 0.5, dropout_implementation='upscale_in_train')
        output = pgl.layers.gcn(gw,
                                output,
                                dataset.num_classes,
                                activation=None,
github PaddlePaddle / PGL / examples / distribute_graphsage / train.py View on Github external
log.info("preprocess finish")
    log.info("Train Examples: %s" % len(data["train_index"]))
    log.info("Val Examples: %s" % len(data["val_index"]))
    log.info("Test Examples: %s" % len(data["test_index"]))

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    samples = []
    if args.samples_1 > 0:
        samples.append(args.samples_1)
    if args.samples_2 > 0:
        samples.append(args.samples_2)

    with fluid.program_guard(train_program, startup_program):
        graph_wrapper = pgl.graph_wrapper.GraphWrapper(
            "sub_graph", fluid.CPUPlace(), node_feat=[('feats', [None, 602], np.dtype('float32'))])
        model_loss, model_acc = build_graph_model(
            graph_wrapper,
            num_class=data["num_class"],
            hidden_size=args.hidden_size,
            graphsage_type=args.graphsage_type,
            k_hop=len(samples))

    test_program = train_program.clone(for_test=True)

    with fluid.program_guard(train_program, startup_program):
        adam = fluid.optimizer.Adam(learning_rate=args.lr)
        adam.minimize(model_loss)

    exe = fluid.Executor(place)
    exe.run(startup_program)
github PaddlePaddle / PGL / examples / gat / train.py View on Github external
def main(args):
    dataset = load(args.dataset)
    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 8

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            place=place,
            node_feat=dataset.graph.node_feat_info())

        output = pgl.layers.gat(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="elu",
                                name="gat_layer_1",
                                num_heads=8,
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)
        output = pgl.layers.gat(gw,
                                output,
                                dataset.num_classes,
github PaddlePaddle / PGL / examples / graphsage / train.py View on Github external
train_program = fluid.Program()
    startup_program = fluid.Program()
    samples = []
    if args.samples_1 > 0:
        samples.append(args.samples_1)
    if args.samples_2 > 0:
        samples.append(args.samples_2)

    with fluid.program_guard(train_program, startup_program):
        feature, feature_init = paddle_helper.constant(
            "feat",
            dtype=data['feature'].dtype,
            value=data['feature'],
            hide_batch_size=False)

        graph_wrapper = pgl.graph_wrapper.GraphWrapper(
            "sub_graph",
            fluid.CPUPlace(),
            node_feat=data['graph'].node_feat_info())
        model_loss, model_acc = build_graph_model(
            graph_wrapper,
            num_class=data["num_class"],
            feature=feature,
            hidden_size=args.hidden_size,
            graphsage_type=args.graphsage_type,
            k_hop=len(samples))

    test_program = train_program.clone(for_test=True)

    with fluid.program_guard(train_program, startup_program):
        adam = fluid.optimizer.Adam(learning_rate=args.lr)
        adam.minimize(model_loss)