Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def set_dle_mode(mode):
if not mode:
return mode, {}
elif isinstance(mode, str):
return mode, {}
elif isinstance(mode, tuple):
if len(mode) == 0:
return 'noop', {}
elif isinstance(mode[-1], dict):
if len(mode) == 2:
return mode
else:
return tuple(flatten(i.split(',') for i in mode[:-1])), mode[-1]
else:
return tuple(flatten(i.split(',') for i in mode)), {}
raise TypeError("Illegal DLE mode %s." % str(mode))
def visit_Iteration(self, o):
symbols = flatten([self._visit(i) for i in o.children])
symbols += self.rule(o)
return filter_sorted(symbols, key=attrgetter('name'))
def visit_Block(self, o):
body = flatten(self._visit(i) for i in o.children)
return c.Module(o.header + (c.Block(body),) + o.footer)
'scope': lambda match, o: match in flatten(o.children)
}
@property
def _dist_scatter_mask(self):
"""
A mask to index into ``self.data``, which creates a new data array that
logically contains N consecutive groups of sparse data values, where N
is the number of MPI ranks. The i-th group contains the sparse data
values accessible by the i-th MPI rank. Thus, sparse data values along
the boundary of two or more MPI ranks are duplicated.
"""
dmap = self._dist_datamap
mask = np.array(flatten(dmap[i] for i in sorted(dmap)), dtype=int)
ret = [slice(None) for i in range(self.ndim)]
ret[self._sparse_position] = mask
return ret
rule = lambda i: i.is_Scalar or i.is_Tensor
# Don't forget this nasty case, with indirections on the LHS:
# >>> u[t, a[x]] = f[x] -> (reads={a, f}, writes={u})
roots = []
for i in exprs:
try:
roots.append(i.rhs)
roots.extend(list(i.lhs.indices))
except AttributeError:
# E.g., FunctionFromPointer
roots.append(i)
reads = []
terminals = flatten(retrieve_terminals(i, deep=True) for i in roots)
for i in terminals:
candidates = i.free_symbols
try:
candidates.update({i.function})
except AttributeError:
pass
for j in candidates:
try:
if rule(j):
reads.append(j)
except AttributeError:
pass
writes = []
for i in exprs:
try:
def handle_indexed(indexed):
relation = []
for i in indexed.indices:
try:
maybe_dim = split_affine(i).var
if isinstance(maybe_dim, Dimension):
relation.append(maybe_dim)
except ValueError:
# Maybe there are some nested Indexeds (e.g., the situation is A[B[i]])
nested = flatten(handle_indexed(n) for n in retrieve_indexed(i))
if nested:
relation.extend(nested)
else:
# Fallback: Just insert all the Dimensions we find, regardless of
# what the user is attempting to do
relation.extend([d for d in filter_sorted(i.free_symbols)
if isinstance(d, Dimension)])
return tuple(relation)
header.extend(omp_parallel + [cgen.Block(extra)])
# Statements to be inserted into the time loop before the spatial loop
pre_stencils = [self.time_substitutions(x)
for x in self.time_loop_stencils_b]
pre_stencils = [self.convert_equality_to_cgen(x)
for x in self.time_loop_stencils_b]
# Statements to be inserted into the time loop after the spatial loop
post_stencils = [self.time_substitutions(x)
for x in self.time_loop_stencils_a]
post_stencils = [self.convert_equality_to_cgen(x)
for x in self.time_loop_stencils_a]
if self.profile:
pre_stencils = list(flatten([self.profiler.add_profiling([s], "%s%d" %
(PRE_STENCILS.name, i)) for i, s in
enumerate(pre_stencils)]))
post_stencils = list(flatten([self.profiler.add_profiling([s], "%s%d" %
(POST_STENCILS.name, i)) for i, s in
enumerate(post_stencils)]))
initial_block = time_stepping + pre_stencils
if initial_block:
initial_block = omp_single + [cgen.Block(initial_block)]
end_block = post_stencils
if end_block:
end_block = omp_single + [cgen.Block(end_block)]