Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
bp = self.random_breakpoint()
if self.debug:
print("--->", offspring, lparent, rparent, bp)
pop[j] = offspring
if bp > 0.0:
tables.edges.add_row(
left=0.0, right=bp, parent=lparent, child=offspring)
if bp < L:
tables.edges.add_row(
left=bp, right=L, parent=rparent, child=offspring)
if self.debug:
print("Done! Final pop:")
print(pop)
flags = tables.nodes.flags
flags[pop] = tskit.NODE_IS_SAMPLE
tables.nodes.set_columns(
flags=flags,
time=tables.nodes.time,
population=tables.nodes.population)
return tables
def test_log_likelihoods(self):
tables = tskit.TableCollection(sequence_length=1)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, population=0,
individual=-1, time=0)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, population=0,
individual=-1, time=0)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, population=0,
individual=-1, time=0)
tables.edges.add_row(left=0, right=0.5, parent=3, child=2)
tables.edges.add_row(left=0.5, right=1, parent=4, child=2)
tables.nodes.add_row(flags=msprime.NODE_IS_RE_EVENT, population=0,
individual=-1, time=0.1)
tables.nodes.add_row(flags=msprime.NODE_IS_RE_EVENT, population=0,
individual=-1, time=0.1)
tables.edges.add_row(left=0, right=1, parent=5, child=1)
tables.edges.add_row(left=0, right=0.5, parent=5, child=3)
tables.nodes.add_row(flags=0, population=0, individual=-1, time=0.25)
def test_with_mutations(self):
N = 10
ngens = 100
tables = wf_sim(N=N, ngens=ngens, deep_history=False, seed=self.random_seed)
tables.sort()
ts = tables.tree_sequence()
ts = tsutil.jukes_cantor(ts, 10, 0.1, seed=self.random_seed)
tables = ts.tables
self.assertGreater(tables.sites.num_rows, 0)
self.assertGreater(tables.mutations.num_rows, 0)
samples = np.where(
tables.nodes.flags == tskit.NODE_IS_SAMPLE)[0].astype(np.int32)
tables.sort()
tables.simplify(samples)
self.assertGreater(tables.nodes.num_rows, 0)
self.assertGreater(tables.edges.num_rows, 0)
self.assertGreater(tables.nodes.num_rows, 0)
self.assertGreater(tables.edges.num_rows, 0)
self.assertGreater(tables.sites.num_rows, 0)
self.assertGreater(tables.mutations.num_rows, 0)
ts = tables.tree_sequence()
self.assertEqual(ts.sample_size, N)
for hap in ts.haplotypes():
self.assertEqual(len(hap), ts.num_sites)
def record_node(self, input_id, is_sample=False):
"""
Adds a new node to the output table corresponding to the specified input
node ID.
"""
node = self.ts.node(input_id)
flags = node.flags
# Need to zero out the sample flag
flags &= ~tskit.NODE_IS_SAMPLE
if is_sample:
flags |= tskit.NODE_IS_SAMPLE
output_id = self.tables.nodes.add_row(
flags=flags, time=node.time, population=node.population,
metadata=node.metadata, individual=node.individual)
self.node_id_map[input_id] = output_id
return output_id
def test_log_likelihoods(self):
tables = tskit.TableCollection(sequence_length=1)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, population=0,
individual=-1, time=0)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, population=0,
individual=-1, time=0)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, population=0,
individual=-1, time=0)
tables.edges.add_row(left=0, right=0.5, parent=3, child=2)
tables.edges.add_row(left=0.5, right=1, parent=4, child=2)
tables.nodes.add_row(flags=msprime.NODE_IS_RE_EVENT, population=0,
individual=-1, time=0.1)
tables.nodes.add_row(flags=msprime.NODE_IS_RE_EVENT, population=0,
individual=-1, time=0.1)
tables.edges.add_row(left=0, right=1, parent=5, child=1)
tables.edges.add_row(left=0, right=0.5, parent=5, child=3)
tables.nodes.add_row(flags=0, population=0, individual=-1, time=0.25)
tables.edges.add_row(left=0, right=1, parent=6, child=0)
tables.edges.add_row(left=0.5, right=1, parent=6, child=4)
tables.nodes.add_row(flags=0, population=0, individual=-1, time=0.5)
init_ts = msprime.simulate(
self.N, recombination_rate=1.0, length=L, random_seed=self.seed)
init_tables = init_ts.dump_tables()
flags = init_tables.nodes.flags
if not self.initial_generation_samples:
flags = np.zeros_like(init_tables.nodes.flags)
tables.nodes.set_columns(
time=init_tables.nodes.time + ngens,
flags=flags)
tables.edges.set_columns(
left=init_tables.edges.left, right=init_tables.edges.right,
parent=init_tables.edges.parent, child=init_tables.edges.child)
else:
flags = 0
if self.initial_generation_samples:
flags = tskit.NODE_IS_SAMPLE
for _ in range(self.N):
tables.nodes.add_row(flags=flags, time=ngens, population=0)
pop = list(range(self.N))
for t in range(ngens - 1, -1, -1):
if self.debug:
print("t:", t)
print("pop:", pop)
dead = [self.rng.random() > self.survival for k in pop]
# sample these first so that all parents are from the previous gen
new_parents = [
(self.rng.choice(pop), self.rng.choice(pop)) for k in range(sum(dead))]
k = 0
if self.debug:
print("Replacing", sum(dead), "individuals.")
left[2 * index + 1] = trees_group["left"]
right = np.zeros(2 * num_rows, dtype=np.float64)
right[2 * index] = trees_group["right"]
right[2 * index + 1] = trees_group["right"]
child = np.array(trees_group["children"], dtype=np.int32).flatten()
tables = tskit.TableCollection(np.max(right))
tables.edges.set_columns(left=left, right=right, parent=parent, child=child)
cr_node = np.array(trees_group["node"], dtype=np.int32)
num_nodes = max(np.max(child), np.max(cr_node)) + 1
sample_size = np.min(cr_node)
flags = np.zeros(num_nodes, dtype=np.uint32)
population = np.zeros(num_nodes, dtype=np.int32)
time = np.zeros(num_nodes, dtype=np.float64)
flags[:sample_size] = tskit.NODE_IS_SAMPLE
cr_population = np.array(trees_group["population"], dtype=np.int32)
cr_time = np.array(trees_group["time"])
time[cr_node] = cr_time
population[cr_node] = cr_population
if "samples" in root:
samples_group = root["samples"]
population[:sample_size] = samples_group["population"]
if "time" in samples_group:
time[:sample_size] = samples_group["time"]
tables.nodes.set_columns(flags=flags, population=population, time=time)
_set_populations(tables)
if "mutations" in root:
mutations_group = root["mutations"]
_convert_hdf5_mutations(
mutations_group, tables.sites, tables.mutations, remove_duplicate_positions)
def add_node(self, time, flags=tskit.NODE_IS_SAMPLE):
# Add a node, marking by default as a sample (as required in an ancestors_ts)
self.num_nodes += 1
self.time.append(time)
self.flags.append(flags)
self.path.append(None)
return self.num_nodes - 1
def _mark_first_generation(self):
'''
Mark all 'first generation' individuals' nodes as samples, and return
the corresponding tree sequence.
'''
tables = self.dump_tables()
first_gen_nodes = ((tables.nodes.individual > 0)
& ((tables.individuals.flags[tables.nodes.individual]
& INDIVIDUAL_FIRST_GEN) > 0))
if sum(first_gen_nodes) == 0:
warnings.warn("Tree sequence does not have the initial generation; " +
" did you simplify it after output from SLiM?")
flags = tables.nodes.flags
flags[first_gen_nodes] = (flags[first_gen_nodes] | tskit.NODE_IS_SAMPLE)
tables.nodes.set_columns(flags=flags, population=tables.nodes.population,
individual=tables.nodes.individual, time=tables.nodes.time,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset)
ts = load_tables(tables)
ts.reference_sequence = self.reference_sequence
return ts
individual=tables.nodes.individual,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset,
)
# Now simplify down the tables to get rid of all sample edges.
node_id_map = tables.simplify(
samples, filter_sites=False, filter_individuals=True, filter_populations=False
)
# We cannot have flags that are both samples and have other flags set,
# so we need to unset all the sample flags for these.
flags = np.zeros_like(tables.nodes.flags)
index = tables.nodes.flags == tskit.NODE_IS_SAMPLE
flags[index] = tskit.NODE_IS_SAMPLE
index = tables.nodes.flags != tskit.NODE_IS_SAMPLE
flags[index] = np.bitwise_and(tables.nodes.flags[index], ~tskit.NODE_IS_SAMPLE)
tables.nodes.set_columns(
flags=flags,
time=tables.nodes.time,
population=tables.nodes.population,
individual=tables.nodes.individual,
metadata=tables.nodes.metadata,
metadata_offset=tables.nodes.metadata_offset,
)
record = provenance.get_provenance_dict(command="extract_ancestors")
tables.provenances.add_row(record=json.dumps(record))
return tables, node_id_map