Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for o in outputs:
n = self.get_node_by_output_in_current_graph(o)
utils.make_sure(n is None, "output tensor named %s already exists in node: \n%s", o, n)
onnx_node = helper.make_node(op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
if op_type in ["If", "Loop", "Scan"]:
# we force the op containing inner graphs not skipped during conversion.
skip_conversion = False
node = Node(onnx_node, self, skip_conversion=skip_conversion)
if onnx_attrs:
_ = [node.set_attr_onnx(a) for a in onnx_attrs]
if shapes:
utils.make_sure(len(shapes) == output_count,
"output shape count %s not equal to output count %s", len(shapes), output_count)
for i in range(output_count):
self.set_shape(node.output[i], shapes[i])
if dtypes:
utils.make_sure(len(dtypes) == output_count,
"output dtypes count %s not equal to output count %s", len(dtypes), output_count)
for i in range(output_count):
self.set_dtype(node.output[i], dtypes[i])
if (not shapes or not dtypes) and infer_shape_dtype:
self.update_node_shape_dtype(node, override=False)
logger.debug("Made node: %s\n%s", node.name, node.summary)
self._nodes.append(node)
return node
new_begin = []
new_end = []
axes = []
# onnx slice op can't remove a axis, track axis and add a squeeze op if needed
needs_squeeze = []
# ellipsis: one bit at most can be 1. An ellipsis implicitly creates as many range specifications as
# necessary to fully specify the sliced range for every dimension.
# For example for a 4-dimensional tensor foo the slice foo[2, ..., 5:8] implies foo[2, :, :, 5:8]
# NOTE: we ignore those axes denoted by ellipsis using `axes` attribute
ellipsis_gap = 0
for idx, begin_item in enumerate(begin):
if strides[idx] != 1:
raise ValueError("StridedSlice: only strides=1 is supported")
if (ellipsis_mask >> idx) & 1:
input_shape = ctx.get_shape(node.input[0])
utils.make_sure(
input_shape is not None,
"StridedSlice op {} requires the shape of input".format(node.name)
)
ellipsis_gap = len(input_shape) - len(begin)
continue
# ignore ellipsis axes
axes.append(idx + ellipsis_gap)
end_item = end[idx]
# an implicit condition is stride == 1 (checked in above)
if begin_item < 0 and end_item == 0:
end_item = max_size
mask = (shrink_axis_mask >> idx) & 1
if mask != 0:
def version_1(cls, ctx, node, **kwargs):
# https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd
# the above link says the data format of input tensor should be (batch, spatial_shape, remaining_shape)
# and we only support 4D here, so the data format is NHWC
# onnx op "SpaceToDepth" does the same work on input tensor except that it works on "C",
# and it only supports NCHW
# T out = SpaceToBatchND(T input, int32 block_shape, int32 crops)
input_tensor = node.inputs[0]
blocksize = node.inputs[1].get_tensor_value()
utils.make_sure(len(ctx.get_shape(input_tensor.output[0])) == 4, "only supports 4D for now")
utils.make_sure(len(blocksize) == 2 and blocksize[0] == blocksize[1],
"only support same blocksize at different dims")
shapes = [ctx.get_shape(node.output[0])]
dtypes = [ctx.get_dtype(node.output[0])]
# implement pads logic, the data format is NHWC
paddings = node.inputs[2].get_tensor_value()
top, bottom = paddings[0]
left, right = paddings[1]
pads = [0, top, left, 0,
0, bottom, right, 0]
ctx.remove_node(node.name)
if ctx.opset <= 10:
pad_op = ctx.make_node("Pad", input_tensor.output, attr={"pads": pads})
else:
def version_7(cls, ctx, node, **kwargs):
# T output = Fill(int32 dims, T value, @int32 index_type)
# T outputs = Tile(T value, int64 repeats (e.g. dims))
fill_shape = ctx.get_shape(node.input[0])
utils.make_sure(fill_shape is not None, "shape of {} is None".format(node.input[0]))
fill_shape_dims = fill_shape[0]
utils.make_sure(fill_shape_dims > 0, "opset 7 requires fill shape length > 0, or please try opset > 7")
val_dtype = ctx.get_dtype(node.input[1])
val_shape = ctx.get_shape(node.input[1])
need_cast = val_dtype != onnx_pb.TensorProto.FLOAT and ctx.opset < 9
new_dtype = val_dtype
if need_cast:
new_dtype = onnx_pb.TensorProto.FLOAT
attr = {"to": new_dtype}
cast_to_float = ctx.insert_new_node_on_input(node, "Cast", node.input[1], name=None, **attr)
ctx.set_dtype(cast_to_float.output[0], new_dtype)
ctx.set_shape(cast_to_float.output[0], val_shape)
for _ in range(fill_shape_dims):
attr = {"axes": [0]}
def version_10(cls, ctx, node, **kwargs):
# T output = ReverseV2(T input, int32|int64 seq_lengths, @int seq_dim, @int batch_dim)
# Implement tensorflow ReverseV2 op using multiple ReverseSequence (for each axis)
# and Transpose ops. We sort the axis vector (if non-empty) at the start. Each axis can
# be reversed only once (in tf) and so we can compute the transpose for each axis
# (other than 0), feed the tensor to a ReverseSequence node and finally transpose again
# to get back the original shape.
axes_node = node.inputs[1]
axes = axes_node.get_tensor_value(as_list=False)
# Current support is for when axis is a 1D tensor.
utils.make_sure(len(axes.shape) == 1 \
, "Currently no support for reverseV2 tensor axis")
axes = axes.tolist()
len_axes = len(axes)
# Store input and output parameters of the ReverseV2 node.
rv2_in_names = [node.input[0]]
input_shape = ctx.get_shape(node.input[0])
# Make sure input shape is not None
utils.make_sure(input_shape is not None, "shape of {} is None".format(node.input[0]))
input_rank = len(input_shape)
rv2_node_name = node.name
# ReverseV2 has a single output.
outputs = [name + ":" + str(i) for i in range(output_count)]
output_count = len(outputs)
raw_attr = {}
onnx_attrs = []
for a, v in attr.items():
if isinstance(v, AttributeProto):
onnx_attrs.append(v)
else:
raw_attr[a] = v
n = self.get_node_by_name(name)
utils.make_sure(n is None, "name %s already exists in node: \n%s", name, n)
for o in outputs:
n = self.get_node_by_output_in_current_graph(o)
utils.make_sure(n is None, "output tensor named %s already exists in node: \n%s", o, n)
onnx_node = helper.make_node(op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
if op_type in ["If", "Loop", "Scan"]:
# we force the op containing inner graphs not skipped during conversion.
skip_conversion = False
node = Node(onnx_node, self, skip_conversion=skip_conversion)
if onnx_attrs:
_ = [node.set_attr_onnx(a) for a in onnx_attrs]
if shapes:
utils.make_sure(len(shapes) == output_count,
"output shape count %s not equal to output count %s", len(shapes), output_count)
for i in range(output_count):
self.set_shape(node.output[i], shapes[i])
def matrixbandpart_op(ctx, node, name, args):
# T output = MatrixBandPart(T input, int num_lower, int num_upper)
# data-flow: first generate mask matrix and then use element-wise mul op
input_rank = len(ctx.get_shape(node.input[0]))
make_sure(input_rank == 2, error_msg="MatrixBandPart op: only rank 2 is supported")
bandpart = [node.inputs[ind].get_tensor_value() for ind in [1, 2]]
utils.make_sure(bandpart in [[-1, 0], [0, -1]], "only support Lower/Upper triangular for now")
# methods to generate mask matrix: if lower triangular is needed, then generate column one by one
# otherwise row is generated one by one.
axis, counter_axis, squeeze_axis = (1, 0, 2) if bandpart == [-1, 0] else (0, 1, 1)
# 1: subgraph to implement tf.onelike(input[:, 0]),
# no need to worry about the dtype, because bool type is needed as Xor only support bool
node_name = utils.make_name("const_zero")
const_zero = ctx.make_const(name=node_name, np_val=np.array([0]).astype(np.int32))
first_col_or_row = ctx.make_node(op_type="Gather", inputs=[node.input[0], const_zero.output[0]],
attr={"axis": axis})
first_col_or_row_casted = ctx.make_node(op_type="Cast", inputs=first_col_or_row.output,
attr={"to": onnx_pb.TensorProto.BOOL})
# line means one col or one row
zero_line = ctx.make_node(op_type="Xor", inputs=first_col_or_row_casted.output*2)
one_line = ctx.make_node(op_type="Not", inputs=zero_line.output)
# 2: "loop" to generate mask matrix: generate col or row of matrix one by one
def _replace_node_with_const(node, graph, vals):
utils.make_sure(len(node.output) == len(vals), "length of node outputs and const vals should be same")
for old_input, val in zip(node.output, vals):
const_node = graph.make_const(utils.make_name("const_fold_opt"), val)
graph.set_dtype(const_node.output[0], utils.map_numpy_to_onnx_dtype(val.dtype))
graph.set_shape(const_node.output[0], val.shape)
graph.replace_all_inputs(graph.get_nodes(), old_input, const_node.output[0])
graph.remove_node(node.name)
def version_1(cls, ctx, node, **kwargs):
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d.html
# the above link says the data format of input tensor should be (batch, spatial_shape, remaining_shape)
# and we only support 3D and 4D here, and the data format is NHC and NHWC
# onnx op "DepthToSpace" does the same work on input tensor except that it works on "C",
# and it only supports NCHW
# T out = BatchToSpaceND(T input, int32 block_shape, int32 crops)
input_tensor = node.inputs[0]
input_shape = ctx.get_shape(input_tensor.output[0])
blocksize = node.inputs[1].get_tensor_value()
crops = node.inputs[2].get_tensor_value()
utils.make_sure(len(input_shape) in (4, 3),
"only supports 3D and 4D for now")
utils.make_sure(len(blocksize) == 2 and blocksize[0] == blocksize[1],
"only support same blocksize at different dims")
# NHWC TO CNHW, so onnx op will work on "N" which is the same as tensorflow
if len(input_shape) == 3:
# insert automatically an Unsqueeze op if the input is 3d
unsqz1 = ctx.make_node("Unsqueeze", input_tensor.output, {"axes": [3]})
trans1 = ctx.make_node("Transpose", unsqz1.output, {"perm": [3, 0, 1, 2]})
else:
trans1 = ctx.make_node("Transpose", input_tensor.output, {"perm": [3, 0, 1, 2]})
reorganize_node = ctx.make_node(node.type, trans1.output, attr={"blocksize": blocksize[0]})
trans2 = ctx.make_node("Transpose", reorganize_node.output, {"perm": [1, 2, 3, 0]})
# implement crop logic, the data format is NHWC
slice_axis = [1, 2]