Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import numpy as np
import networkx as nx
import scipy.sparse as sp
from sklearn import preprocessing
from .. import BaseModel, register_model
@register_model("hope")
class HOPE(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--beta', type=float, default=0.01,
help='Parameter of katz for HOPE. Default is 0.01')
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args.hidden_size, args.beta)
def __init__(self, dimension, beta):
super(HOPE, self).__init__()
self.dimension = dimension
import numpy as np
import networkx as nx
from gensim.models import Word2Vec, KeyedVectors
import random
from .. import BaseModel, register_model
@register_model("deepwalk")
class DeepWalk(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--walk-length', type=int, default=80,
help='Length of walk per source. Default is 80.')
parser.add_argument('--walk-num', type=int, default=40,
help='Number of walks per source. Default is 40.')
parser.add_argument('--window-size', type=int, default=5,
help='Window size of skip-gram model. Default is 5.')
parser.add_argument('--worker', type=int, default=10,
help='Number of parallel workers. Default is 10.')
parser.add_argument('--iteration', type=int, default=10,
help='Number of iterations. Default is 10.')
# fmt: on
import networkx as nx
import numpy as np
import scipy.sparse as sp
from .. import BaseModel, register_model
@register_model("netmf")
class NetMF(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--window-size", type=int, default=5)
parser.add_argument("--rank", type=int, default=256)
parser.add_argument("--negative", type=int, default=1)
parser.add_argument('--is-large', action='store_true')
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size, args.window_size, args.rank, args.negative, args.is_large
)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn.conv import GATConv
from cogdl.layers import SELayer
from .. import BaseModel, register_model
@register_model("drgat")
class DrGAT(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--hidden-size", type=int, default=8)
parser.add_argument("--num-heads", type=int, default=8)
parser.add_argument("--dropout", type=float, default=0.6)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
import numpy as np
import networkx as nx
from sklearn import preprocessing
import time
from tqdm import tqdm
from .. import BaseModel, register_model, alias_draw, alias_setup
@register_model("line")
class LINE(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--walk-length', type=int, default=80,
help='Length of walk per source. Default is 50.')
parser.add_argument('--walk-num', type=int, default=40,
help='Number of walks per source. Default is 20.')
parser.add_argument('--negative', type=int, default=5,
help='Number of negative node in sampling. Default is 5.')
parser.add_argument('--batch-size', type=int, default=1000,
help='Batch size in SGD training process. Default is 1000.')
parser.add_argument('--alpha', type=float, default=0.025,
help='Initial learning rate of SGD. Default is 0.025.')
parser.add_argument('--order', type=int, default=3,
import time
import networkx as nx
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy.special import iv
from sklearn import preprocessing
from sklearn.utils.extmath import randomized_svd
from .. import BaseModel, register_model
@register_model("prone")
class ProNE(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--step", type=int, default=5,
help=" Number of items in the chebyshev expansion")
parser.add_argument("--mu", type=float, default=0.2)
parser.add_argument("--theta", type=float, default=0.5)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(args.hidden_size, args.step, args.mu, args.theta)
def __init__(self, dimension, step, mu, theta):
import numpy as np
import networkx as nx
from gensim.models import Word2Vec, KeyedVectors
import random
import time
from .. import BaseModel, register_model, alias_draw, alias_setup
@register_model("node2vec")
class Node2vec(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--walk-length', type=int, default=80,
help='Length of walk per source. Default is 80.')
parser.add_argument('--walk-num', type=int, default=40,
help='Number of walks per source. Default is 40.')
parser.add_argument('--window-size', type=int, default=5,
help='Window size of skip-gram model. Default is 5.')
parser.add_argument('--worker', type=int, default=10,
help='Number of parallel workers. Default is 10.')
parser.add_argument('--iteration', type=int, default=10,
help='Number of iterations. Default is 10.')
parser.add_argument('--p', type=float, default=1.0,
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn.conv import GCNConv
from cogdl.layers import SELayer
from .. import BaseModel, register_model
@register_model("drgcn")
class DrGCN(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--hidden-size", type=int, default=16)
parser.add_argument("--num-layers", type=int, default=2)
parser.add_argument("--dropout", type=float, default=0.5)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features,
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn.conv import GATConv
from .. import BaseModel, register_model
@register_model("pyg_gat")
class GAT(BaseModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--num-classes", type=int)
parser.add_argument("--hidden-size", type=int, default=8)
parser.add_argument("--num-heads", type=int, default=8)
parser.add_argument("--dropout", type=float, default=0.6)
parser.add_argument("--lr", type=float, default=0.005)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
for i, attention in enumerate(self.attentions):
self.add_module("attention_{}".format(i), attention)
self.out_att = GraphAttentionLayer(
nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False
)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=1)
@register_model("gat")
class PetarVSpGAT(PetarVGAT):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Sparse version of GAT."""
BaseModel.__init__(self)
self.dropout = dropout
self.attentions = [
SpGraphAttentionLayer(
nfeat, nhid, dropout=dropout, alpha=alpha, concat=True
)
for _ in range(nheads)
]
for i, attention in enumerate(self.attentions):
self.add_module("attention_{}".format(i), attention)
self.out_att = SpGraphAttentionLayer(