Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_from_igraph_invalid_attribute():
with assert_warns_message(
UserWarning, "Edge attribute invalid not found. Returning unweighted graph"
):
n = 100
m = 500
K = np.zeros((n, n))
for _ in range(m):
e = np.random.choice(n, 2, replace=False)
K[e[0], e[1]] = K[e[1], e[0]] = 1
g = igraph.Graph.Adjacency(K.tolist())
G = graphtools.from_igraph(g, attribute="invalid")
def test_from_igraph_invalid_precomputed():
with assert_warns_message(
UserWarning,
"Cannot build graph from igraph with precomputed=affinity. Use 'adjacency' instead.",
):
n = 100
m = 500
K = np.zeros((n, n))
for _ in range(m):
e = np.random.choice(n, 2, replace=False)
K[e[0], e[1]] = K[e[1], e[0]] = 1
g = igraph.Graph.Adjacency(K.tolist())
G = graphtools.from_igraph(g, attribute=None, precomputed="affinity")
verbose: {False, True}, optional
If set to True, then prints to the screen a progress indicator every 100
levels.
Returns
-------
T : levelSetTree
See the LevelSetTree class for attributes and method definitions.
"""
n = len(W)
_levels = [float(x) for x in levels]
## Initialize the graph and cluster tree
G = igr.Graph.Adjacency(W.tolist(), mode=igr.ADJ_MAX)
G.vs['index'] = range(n)
T = lst.LevelSetTree(bg_sets, _levels)
cc0 = G.components()
for i, c in enumerate(cc0):
T.subgraphs[i] = G.subgraph(c)
T.nodes[i] = lst.ConnectedComponent(i, parent=None, children=[],
start_level=0., end_level=None, start_mass=0.0,
end_mass=None, members=G.vs[c]['index'])
# Loop through the removal grid
for i, (level, bg) in enumerate(zip(_levels, bg_sets)):
if verbose and i % 100 == 0:
def plot(self, margin=50):#分类树的绘图函数
A = self.get_adjacency_matrix_as_list()
# print"type(A)=",A
# print"A=",A
convert_to_igraph = ig.Graph.Adjacency(A)
g=convert_to_igraph
for vertex in self.vertices:
index=self.vertices.index(vertex)
if vertex.pivot !=None:
if type(vertex.pivot)==set:
label_pivot = ' in '+str(list(vertex.pivot))
else:
label_pivot = ' less than '+str(vertex.pivot)
g.vs[index]['label']=str(vertex.split_attribute)+label_pivot
g.vs[index]['label_dist']=2
g.vs[index]['label_color']='red'
g.vs[index]['color'] = 'red'
else:
label=str(vertex.predicted_class)
g.vs[index]['color']='blue'
g.vs[index]['label']=label
G_und = ig.Graph.Erdos_Renyi(n=d, m=s0)
B_und = _graph_to_adjmat(G_und)
B = _random_acyclic_orientation(B_und)
elif graph_type == 'SF':
# Scale-free, Barabasi-Albert
G = ig.Graph.Barabasi(n=d, m=int(round(s0 / d)), directed=True)
B = _graph_to_adjmat(G)
elif graph_type == 'BP':
# Bipartite, Sec 4.1 of (Gu, Fu, Zhou, 2018)
top = int(0.2 * d)
G = ig.Graph.Random_Bipartite(top, d - top, m=s0, directed=True, neimode=ig.OUT)
B = _graph_to_adjmat(G)
else:
raise ValueError('unknown graph type')
B_perm = _random_permutation(B)
assert ig.Graph.Adjacency(B_perm.tolist()).is_dag()
return B_perm
def plot(self, margin=50):#回归树的绘图函数
A = self.get_adjacency_matrix_as_list()
convert_to_igraph = ig.Graph.Adjacency(A)
g=convert_to_igraph
for vertex in self.vertices:
index=self.vertices.index(vertex)
if vertex.pivot !=None:
if type(vertex.pivot)==set:
label_pivot = ' in '+str(list(vertex.pivot))
else:
label_pivot = ' less than '+str(vertex.pivot)
g.vs[index]['label']=str(vertex.split_attribute)+label_pivot
g.vs[index]['label_dist']=2
g.vs[index]['label_color']='red'
g.vs[index]['color'] = 'red'
else:
kwHist = dict([item for item in Counter([k for kwList in keywords for k in kwList]).most_common() if item[1] > 1])
print("%d Keywords that occur 2 or more times - for Features"%len(kwHist))
#build features
features = buildFeatures(df, kwHist, idf)
print("Compute, and Threshold similarity")
# compute similarity
sim = simCosine(features)
# threshold
sim = threshold(sim)
# build and partition network
print("Build and partition network")
nw = ig.Graph.Adjacency(sim.tolist(), mode="UNDIRECTED")
nw.vs['name'] = range(sim.shape[0])
partL = nw.community_multilevel(return_levels=False)
# extract clusters and add cluster id and name to each node
clL = partL.subgraphs()
clL.sort(key=lambda x:x.vcount(), reverse=True)
cls = dict([(v['name'],i) for i in range(len(clL)) if clL[i].vcount() > 1 for v in clL[i].vs])
df['id'] = xrange(nDoc)
df['clusId'] = df['id'].apply(lambda idx: str(cls[idx]) if idx in cls else None)
buildClusterNames(df, kwHist)
# layout the graph
print("Running layout algo")
df.set_index('id', drop=False)
layout = nw.layout_fruchterman_reingold()
coords = np.array(layout.coords)
from sklearn.gaussian_process import GaussianProcessRegressor
gp = GaussianProcessRegressor()
x = gp.sample_y(X, random_state=None).flatten() + z
elif sem_type == 'gp-add':
from sklearn.gaussian_process import GaussianProcessRegressor
gp = GaussianProcessRegressor()
x = sum([gp.sample_y(X[:, i, None], random_state=None).flatten()
for i in range(X.shape[1])]) + z
else:
raise ValueError('unknown sem type')
return x
d = B.shape[0]
scale_vec = noise_scale if noise_scale else np.ones(d)
X = np.zeros([n, d])
G = ig.Graph.Adjacency(B.tolist())
ordered_vertices = G.topological_sorting()
assert len(ordered_vertices) == d
for j in ordered_vertices:
parents = G.neighbors(j, mode=ig.IN)
X[:, j] = _simulate_single_equation(X[:, parents], scale_vec[j])
return X