Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_from_igraph_weighted():
n = 100
m = 500
K = np.zeros((n, n))
for _ in range(m):
e = np.random.choice(n, 2, replace=False)
K[e[0], e[1]] = K[e[1], e[0]] = np.random.uniform(0, 1)
g = igraph.Graph.Weighted_Adjacency(K.tolist())
G = graphtools.from_igraph(g)
G2 = graphtools.Graph(K, precomputed="adjacency")
assert np.all(G.K == G2.K)
def is_dag(W):
G = ig.Graph.Weighted_Adjacency(W.tolist())
return G.is_dag()
def getGraPrps(seq):
prps = []
stc, mfe, emfe, cstc, cmfe, cdst, frq, div, bpp = FOLDER.fold(seq)
# Graph features
adj = stc2adj(stc)
for i in xrange(1, len(seq)):
bpp[i-1,i] = 1
bpp[i,i-1] = 1
gra_h = igraph.Graph.Weighted_Adjacency(adj.tolist(), mode=igraph.ADJ_UPPER)
gra_s = igraph.Graph.Weighted_Adjacency(bpp.tolist(), mode=igraph.ADJ_UPPER)
prps.append(len(gra_h.articulation_points())) # Number of articulation points (hard)
prps.append(len(gra_s.articulation_points())) # Number of articulation points (soft)
prps.append(gra_h.average_path_length()) # Average shortest path length (hard)
prps.append(gra_s.average_path_length()) # Average shortest path length (soft)
a = gra_h.betweenness(directed=False)
prps.append(numpy.mean(a)) # Average vertex betweenness (hard)
# Linear relation with path length (hard)
prps.append(numpy.std(a)) # Std dev. vertex betweenness (hard)
# Linear relation with edge betweeness (hard)
a = gra_s.betweenness(directed=False)
prps.append(numpy.mean(a)) # Average vertex betweenness (soft)
# Linear relation with path length (soft)
if show_edge_labels:
edge_labels = {(edge[0], edge[1]): str(int(ii)) for ii, edge in enumerate(adjacency)}
else:
edge_labels = None
if test_format == "sparse":
graph = adjacency
elif test_format == "dense":
graph = adjacency_matrix
elif test_format == "networkx":
import networkx
graph = networkx.DiGraph(adjacency_matrix)
elif test_format == "igraph":
import igraph
graph = igraph.Graph.Weighted_Adjacency(adjacency_matrix.tolist())
if not InteractiveClass:
return draw(graph, node_labels=node_labels, edge_labels=edge_labels, **kwargs)
else:
return InteractiveClass(graph, node_labels=node_labels, edge_labels=edge_labels, **kwargs)
def read(self):
K = scipy.io.mmread(self.coauthorMatrixFilename)
graph = igraph.Graph.Weighted_Adjacency(K, mode="PLUS", loops=False)
print(graph.summary())
graph.simplify(combine_edges=sum)
graph.es["invWeight"] = 1.0/numpy.array(graph.es["weight"])
return graph
def calcFtrs(seq):
ftrs = []
stc, mfe, emfe, cstc, cmfe, cdst, frq, div, bpp = FOLDER.fold(seq)
# Graph features
bpp = bpp * 10
gra_s = igraph.Graph.Weighted_Adjacency(bpp.tolist(), mode=igraph.ADJ_UPPER)
ftrs.append(len(gra_s.articulation_points())) # Number of articulation points
ftrs.append(gra_s.average_path_length()) # Average shortest path length
a = gra_s.betweenness(directed=False)
#ftrs.append(numpy.mean(a)) # Average vertex betweenness
# Linear relation with path length
#ftrs.append(numpy.std(a)) # Std dev. vertex betweenness
# Linear relation with edge betweeness
a = gra_s.edge_betweenness(directed=False)
ftrs.append(numpy.mean(a)) # Average edge betweenness
ftrs.append(numpy.std(a))# Std dev. edge betweenness
#ftrs.append(numpy.mean(gra_s.cocitation())) # Average cocitation distance
def calcFtrs(seq):
ftrs = []
stc, mfe = FOLDER.fold(seq)
# Graph features
adj = stc2adj(stc)
gra_h = igraph.Graph.Weighted_Adjacency(adj.tolist(), mode=igraph.ADJ_UPPER)
ftrs.append(len(gra_h.articulation_points())) # Number of articulation points
ftrs.append(gra_h.average_path_length()) # Average shortest path length
a = gra_h.betweenness(directed=False)
#ftrs.append(numpy.mean(a)) # Average vertex betweenness
# Linear relation with path length
#ftrs.append(numpy.std(a)) # Std dev. vertex betweenness
# Linear relation with edge betweeness
a = gra_h.edge_betweenness(directed=False)
ftrs.append(numpy.mean(a)) # Average edge betweenness
ftrs.append(numpy.std(a))# Std dev. edge betweenness
#ftrs.append(numpy.mean(gra_h.cocitation())) # Average cocitation distance
scale_vec = noise_scale * np.ones(d)
else:
if len(noise_scale) != d:
raise ValueError('noise scale must be a scalar or has length d')
scale_vec = noise_scale
if not is_dag(W):
raise ValueError('W must be a DAG')
if np.isinf(n): # population risk for linear gauss SEM
if sem_type == 'gauss':
# make 1/d X'X = true cov
X = np.sqrt(d) * np.diag(scale_vec) @ np.linalg.inv(np.eye(d) - W)
return X
else:
raise ValueError('population risk not available')
# empirical risk
G = ig.Graph.Weighted_Adjacency(W.tolist())
ordered_vertices = G.topological_sorting()
assert len(ordered_vertices) == d
X = np.zeros([n, d])
for j in ordered_vertices:
parents = G.neighbors(j, mode=ig.IN)
X[:, j] = _simulate_single_equation(X[:, parents], W[parents, j], scale_vec[j])
return X