Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# zero entries: not infected, one entries: infected
infected = np.zeros(len(g1.vs()))
infected[initial_index] = 1
# lambda expression for the coloring of nodes according to infection status
# x = 1 ==> color red
# x = 0 ==> color white
color_infected = lambda x: "rgb(255,"+str(int((1-x)*255))+","+str(int((1-x)*255))+")"
t_range = range(min(time.keys()), max(time.keys())+1)
# Create video frames
i = 0
for t in t_range:
i += 1
slice = igraph.Graph(n=len(g1.vs()), directed=False)
slice.vs["name"] = g1.vs["name"]
# this should work as time is a defaultdict
for e in time[t]:
slice.add_edge(e[0], e[1])
if infected[map_name_to_id[e[0]]] == 1:
infected[map_name_to_id[e[1]]] = 1
if infected[map_name_to_id[e[1]]] == 1:
infected[map_name_to_id[e[0]]] = 1
visual_style["vertex_color"] = [color_infected(x) for x in infected]
igraph.plot(slice, file_prefix + '_frame_' + str(t).zfill(5) + '.png', **visual_style)
c = Counter(infected)
if i % 100 == 0:
Log.add('Step ' +str(i) + ' infected = ' + str(c[1]))
def initialize_graph(path):
if not os.path.exists(path):
graph = Graph(directed=True)
graph.vs['name'] = []
graph.vs['func_name'] = []
graph.vs['func_path'] = []
graph.vs['output_index'] = []
with open(path, 'w') as f:
graph.write_pickle(f)
def makeiGraph(self):
if self.fullConnNodes != None:
self.fullConnNodes.sort(key=utility.sortByNodeId)
g = Graph(directed=False)
g.add_vertices(len(self.fullConnNodes))
es = []
for ch in self.channels:
es += [(ch.node1.nodeid, ch.node2.nodeid)]
if es != []:
g.add_edges(es)
self.igraph = g
def leiden(self):
""" Cluster the SNN graph using the Leiden algorithm.
https://github.com/vtraag/leidenalg
From Louvain to Leiden: guaranteeing well-connected
communities Traag V, Waltman L, van Eck NJ
https://arxiv.org/abs/1810.08473 """
log_debug('Running leiden clustering...')
res = self.params['leiden_res']
seed = self.params['seed']
# construct the graph object
nn = set(self.snn_graph[self.snn_graph.columns[0]])
g = ig.Graph()
g.add_vertices(len(nn))
g.vs['name'] = list(range(1, len(nn)+1))
ll = []
for i in self.snn_graph.itertuples(index=False):
ll.append(tuple(i))
g.add_edges(ll)
if self.params == 'ModularityVertexPartition':
part = leidenalg.ModularityVertexPartition
else:
part = leidenalg.RBERVertexPartition
cl = leidenalg.find_partition(g,
part,
n_iterations=10,
resolution_parameter=res,
seed=seed)
self.leiden_cl = cl.membership
def generate_interaction_network(self):
"""
Generate interaction network using current distances in friendship network.
Returns
-------
interaction network: ndarray[N,N]
"""
# Create a numpy array containing all path lengths from the friendship network
# Convert networkx graph to igraph graph via edge list (fastest way)
transformed_network = igraph.Graph(n=len(self.n_individual),
edges=list(zip(*list(zip(*nx.to_edgelist(self.friendship_network)))[:2])))
# Perform Dijkstra algorithm that is much faster in igraph
distance_metric_matrix = np.array(transformed_network.shortest_paths(), dtype=float)
# Create interaction probability matrix using formula from Schleussner et al. with an exponential decay
# depending on distance in the network
interaction_probability_matrix = (self.p_ai - self.interaction_offset) * \
np.exp(-(distance_metric_matrix - 1) / 2.)
# Find longest path
distmax = distance_metric_matrix[np.isfinite(distance_metric_matrix)].max()
# Create histogram using shortest and longest path
histo_bins = np.arange(1, distmax)
histo_range = [histo_bins.min(), histo_bins.max()]
weight_attr='weight'):
""" Convert time slices to layer graphs.
Each graph is considered to represent a time slice. This function simply
connects all the consecutive slices (i.e. the slice graph) with an
``interslice_weight``. The further conversion is then delegated to
:func:`slices_to_layers`, which also provides further details.
See Also
--------
:func:`find_partition_temporal`
:func:`slices_to_layers`
"""
G_slices = _ig.Graph.Tree(len(graphs), 1, mode=_ig.TREE_UNDIRECTED)
G_slices.es[weight_attr] = interslice_weight
G_slices.vs[slice_attr] = graphs
return slices_to_layers(G_slices,
slice_attr,
vertex_id_attr,
edge_type_attr,
weight_attr)
def create_pe_graph(self, pe_list):
'''Create a graph for a pipeline.
Args:
pe_list (list): A list of pipeline elements that represent a pipeline.
Returns:
Graph: A graph of :class:`lost.db.model.PipeElement` objects.
pe_graph.vs[0] is source and pe_graph.vs[pe_graph.vcount()-1]
is sink.
'''
pe_graph = igraph.Graph(directed=True)
pe_graph.add_vertices(len(pe_list)+2)
new_vs = pe_graph.vs.select(range(1,len(pe_list)+1))
new_vs["pe"] = pe_list
new_vs["visited"] = False
sink = pe_graph.vs[pe_graph.vcount()-1]
sink["visited"] = False
pe_graph.vs[0]["visited"] = True
for pe in pe_list:
v_pe_n = pe_graph.vs.select(pe_eq=pe)[0]
if pe.state == state.PipeElement.FINISHED:
v_pe_n["visited"] = True
#Check if pe should be linked to source
target = pe_graph.es.select(_target=v_pe_n.index)
if len(target) == 0:
pe_graph.add_edge(0, v_pe_n.index)
# Link PipeElements
def _create_graph_from_natural_language(sentence):
g = Graph(directed=True)
db = GraphDatabase(g)
parser = Parser(db)
parsed_dict = parser.execute(sentence)
db = parsed_dict['graph']
n = 50
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/verbs.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/names.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/various.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/prepositions.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/punctuation.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/compound1.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/compound2.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/adjectives.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/delete.parvus')).read(), n)
db = repeat_db_rules_n_times(db, open(os.path.join(_path, '../rules/subordinates.parvus')).read(), n)
def initializeGraphFromObsDb(self, obs_db):
t0 = time.time()
#create graph and label the vertices
G = igraph.Graph(self.numCams)
for id, vert in enumerate(G.vs):
vert["label"] = "cam{0}".format(id)
#go through all times
for timestamp in self.obs_db.getAllViewTimestamps():
#cameras that have a target view at this timestamp instant
cam_ids_at_timestamp = set( obs_db.getCamIdsAtTimestamp(timestamp) )
#go through all edges of the graph and check if we have common corners
possible_edges = itertools.combinations(cam_ids_at_timestamp, 2)
for edge in possible_edges:
cam_id_A = edge[0]
cam_id_B = edge[1]
#and check them against the other cams for common corners (except against itself...)