Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
A, dace.symbolic.pystr_to_symbolic("N * K"),
dace.properties.SubsetProperty.from_string("0:N, 0:K"), 1))
state.add_memlet_path(
read_A_sdfg_node,
A_pipe_read,
src_conn="pipe",
memlet=dace.memlet.Memlet(
A_pipe_out, dace.symbolic.pystr_to_symbolic("N * K"),
dace.properties.SubsetProperty.from_string("0"), 1))
state.add_memlet_path(
B,
read_B_sdfg_node,
dst_conn="mem",
memlet=dace.memlet.Memlet(
B, dace.symbolic.pystr_to_symbolic("(N / P) * K * M"),
dace.properties.SubsetProperty.from_string("0:K, 0:M"), 1))
state.add_memlet_path(
read_B_sdfg_node,
B_pipe_read,
src_conn="pipe",
memlet=dace.memlet.Memlet(
B_pipe_out, dace.symbolic.pystr_to_symbolic("(N / P) * K * M"),
dace.properties.SubsetProperty.from_string("0"), 1))
state.add_memlet_path(
C_pipe_write,
write_C_sdfg_node,
dst_conn="pipe",
memlet=dace.memlet.Memlet(
C_pipe_out, dace.symbolic.pystr_to_symbolic("N * M"),
dace.properties.SubsetProperty.from_string("P"), 1))
src_ast = inliner.visit(src_ast)
# 2. resolve all the symbols in the AST
allowed_globals = global_vars.copy()
allowed_globals.update(argtypes)
symresolver = SymbolResolver(allowed_globals)
src_ast = symresolver.visit(src_ast)
# 3. Parse the DaCe program to a hierarchical dependency representation
ast_parser = ParseDaCe(src_file, src_line, argtypes, global_vars, modules,
symresolver)
ast_parser.visit(src_ast)
pdp = ast_parser.program
pdp.source = src
pdp.filename = src_file
pdp.param_syms = sorted(symbolic.getsymbols(argtypes.values()).items())
pdp.argtypes = argtypes
return pdp
def sym2cpp(s):
""" Converts an array of symbolic variables (or one) to C++ strings. """
if not isinstance(s, list):
return cppunparse.pyexpr2cpp(symbolic.symstr(s))
return [cppunparse.pyexpr2cpp(symbolic.symstr(d)) for d in s]
def interstate_symbols(self):
""" Returns variables are assigned/used in the top-level and can be
shared between states.
"""
assigned = collections.OrderedDict()
used = collections.OrderedDict()
# Find symbols in inter-state edges
for _, _, edge_data in self.edges():
for var, expr in edge_data.assignments.items():
assigned[var] = dt.Scalar(symbolic.symtype(expr))
if isinstance(expr, str):
expr = sp.sympify(expr) # Convert string to sympy expr
if isinstance(expr, sp.Expr):
for s in dace.symbolic.symbols_in_sympy_expr(expr):
used[s] = dt.Scalar(symbolic.symbol(s).dtype)
elif expr is None or isinstance(expr, int):
pass # Nothing to extract, or a constant
else:
raise TypeError("Unexpected type: {}".format(type(expr)))
for s in edge_data.condition_symbols():
used[s] = dt.Scalar(symbolic.symbol(s).dtype)
for state in self.nodes():
a, u = state.interstate_symbols()
assigned.update(a)
used.update(u)
def update_resolved_symbol(self, sym):
""" Notifies an array that a symbol has been resolved so that it
can be resized. """
self.resize(
[symbolic.eval(s, 0) for s in self.descriptor.shape],
refcheck=False)
self._symlist = symbolic.symlist(self.descriptor.shape)
for gname, gval in f_globals.items():
if isinstance(gval, symbolic.symbol):
if gval.name in symbols:
resolve[gname] = gval.get() # Raise exception if undefined
else:
resolve[gname] = None # Mark unrelated symbols for removal
f_globals.update(resolve)
# Remove unrelated symbols from globals
for rk, rv in resolve.items():
if rv is None:
del f_globals[rk]
# Resolve symbols in arguments as well
newargs = tuple(symbolic.eval(a) for a in args)
##################################################################
# Store parameter objects
pdp.arrayobjs = {
k: v
for k, v in zip(pdp.params, newargs) if isinstance(v, numpy.ndarray)
}
# Simulate f
################################
# Obtain function object
gen_module = {}
gen_module.update(f_globals)
exec(codeobj, gen_module)
cfunc = gen_module[fname]
suffix.append('m')
elif c == '*':
suffix.append('t')
elif c == '/':
suffix.append('d')
cloned_name += '_' + ''.join(suffix)
except:
continue
if cloned_name in sdfg.arrays.keys():
cloned_array = sdfg.arrays[cloned_name]
elif array_node.data in cloned_arrays:
cloned_array = cloned_arrays[array_node.data]
else:
full_shape = []
for r in memlet.bounding_box_size():
size = symbolic.overapproximate(r)
try:
full_shape.append(int(size))
except:
full_shape.append(size)
actual_dims = [
idx for idx, r in enumerate(full_shape)
if not (isinstance(r, int) and r == 1)
]
if len(actual_dims) == 0: # abort
actual_dims = [len(full_shape) - 1]
if isinstance(array, dace.data.Scalar):
sdfg.add_array(name=cloned_name,
shape=[1],
dtype=array.dtype,
transient=True,
storage=dace.dtypes.StorageType.GPU_Global)
##################################################################
f_globals = {}
# WORKAROUND: Works around a bug in CPython 2.x where True and
# False are undefined
f_globals['True'] = True
f_globals['False'] = False
######################
# Allow certain namespaces/modules and constants
f_globals.update(pdp.globals)
# Resolve symbols
symbols = {}
symbols.update(symbolic.getsymbols(
args)) # from parameter values (externally defined as "dace.symbol")
symbols.update(param_symbols) # from parameter values (constant inputs)
resolve = {}
for gname, gval in f_globals.items():
if isinstance(gval, symbolic.symbol):
if gval.name in symbols:
resolve[gname] = gval.get() # Raise exception if undefined
else:
resolve[gname] = None # Mark unrelated symbols for removal
f_globals.update(resolve)
# Remove unrelated symbols from globals
for rk, rv in resolve.items():
if rv is None:
if arr.storage in [
dace.dtypes.StorageType.GPU_Global,
dace.dtypes.StorageType.FPGA_Global
]:
raise NotImplementedError('Non-host return values are '
'unsupported')
# Create an array with the properties of the SDFG array
self._return_arrays.append(
np.ndarray([symbolic.evaluate(s, syms) for s in arr.shape],
arr.dtype.type,
buffer=np.ndarray(
[symbolic.evaluate(arr.total_size, syms)],
arr.dtype.type),
strides=[
symbolic.evaluate(s, syms) * arr.dtype.bytes
for s in arr.strides
]))
self._return_kwarrays[arrname] = self._return_arrays[-1]
# Set up return_arrays field
if len(self._return_arrays) == 0:
self._return_arrays = None
elif len(self._return_arrays) == 1:
self._return_arrays = self._return_arrays[0]
else:
self._return_arrays = tuple(self._return_arrays)
return self._return_kwarrays
return None
# If the copy is contiguous, the difference between the first and last
# pointers should be the shape of the copy
first_src_index = src_subset.at([0] * src_subset.dims(), src_strides)
first_dst_index = dst_subset.at([0] * dst_subset.dims(), dst_strides)
last_src_index = src_subset.at([d - 1 for d in src_subset.size()],
src_strides)
last_dst_index = dst_subset.at([d - 1 for d in dst_subset.size()],
dst_strides)
copy_length = functools.reduce(lambda x, y: x * y, copy_shape)
src_copylen = last_src_index - first_src_index + 1
dst_copylen = last_dst_index - first_dst_index + 1
# Make expressions symbolic and simplify
copy_length = symbolic.pystr_to_symbolic(copy_length).simplify()
src_copylen = symbolic.pystr_to_symbolic(src_copylen).simplify()
dst_copylen = symbolic.pystr_to_symbolic(dst_copylen).simplify()
# Detect 1D copies. The first condition is the general one, whereas the
# second one applies when the arrays are completely equivalent in strides
# and shapes to the copy. The second condition is there because sometimes
# the symbolic math engine fails to produce the same expressions for both
# arrays.
if ((src_copylen == copy_length and dst_copylen == copy_length)
or (tuple(src_shape) == tuple(copy_shape)
and tuple(dst_shape) == tuple(copy_shape)
and tuple(src_strides) == tuple(dst_strides))):
# Emit 1D copy of the whole array
copy_shape = [functools.reduce(lambda x, y: x * y, copy_shape)]
return copy_shape, [1], [1]
# 1D strided copy