Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def version_7(cls, ctx, node, **kwargs):
# make subgraph to implement one_hot, idea comes from onehot_op
indices_name = node.input[1]
indices_shape = ctx.get_shape(indices_name)
if len(indices_shape) != 1:
# TODO: this works for rank=1 but tensorflow supports more than this.
# Same principle should work but we need to implement our own eye.
raise ValueError("onehot op: only rank1 is supported")
logit_name = node.input[0]
depth = ctx.get_shape(logit_name)[-1]
# if number of classes is unknown or too large
if depth == utils.ONNX_UNKNOWN_DIMENSION or depth > 20000:
sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs)
return
logit_dtype = ctx.get_dtype(logit_name)
utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name))
dtype = utils.map_onnx_to_numpy_type(logit_dtype)
eye = np.eye(depth).astype(dtype)
const_name = utils.make_name("const_eye")
const_eye = ctx.make_const(name=const_name, np_val=eye)
onehot = ctx.make_node(op_type="Gather", inputs=[const_eye.output[0], indices_name], attr={"axis": 0})
log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=[logit_name])
# implement tf.multiply(np.float32(-1.0), tf.reduce_sum(tf.multiply(one_hot, log_softmax), axis=1))
mul1 = ctx.make_node(op_type="Mul", inputs=[onehot.output[0], log_softmax.output[0]])
reduce_sum = ctx.make_node(op_type="ReduceSum", inputs=[mul1.output[0]], attr={"axes": [1]})
const_name = utils.make_name("const_negative_one")
const_negative_one = ctx.make_const(name=const_name, np_val=np.array(-1).astype(dtype))
def _process_c_or_h_init_nodes(self, initializer_input_id, rnn_props):
# todo: remove this once Fill ops is supported
fill_ch_init_node = self._workaround_fill_ch_init_node(initializer_input_id, rnn_props)
if fill_ch_init_node:
return fill_ch_init_node.output[0]
node = self.g.get_node_by_output(initializer_input_id)
if node.is_const():
val = node.get_tensor_value()
initial_name = utils.make_name("Const")
new_val = np.expand_dims(val, axis=0)
const_node = self.g.make_const(initial_name, new_val)
return const_node.output[0]
squeeze_node = self.g.make_node("Unsqueeze", [initializer_input_id], attr={"axes": [0]})
self.g.replace_all_inputs(self.g.get_nodes(), initializer_input_id, squeeze_node.output[0])
self.all_nodes.append(squeeze_node)
return squeeze_node.output[0]
def _process_init_nodes(self, initializer_input_id, rnn_props):
# copy from lstm_rewriter
# todo: remove this once Fill ops is supported
fill_ch_init_node = self._workaround_fill_ch_init_node(initializer_input_id, rnn_props)
if fill_ch_init_node:
return fill_ch_init_node.output[0]
node = self.g.get_node_by_output(initializer_input_id)
if node.is_const():
val = node.get_tensor_value()
initial_name = utils.make_name("Const")
new_val = np.expand_dims(val, axis=0)
const_node = self.g.make_const(initial_name, new_val)
return const_node.output[0]
squeeze_node = self.g.make_node("Unsqueeze", [initializer_input_id], attr={"axes": [0]})
self.g.replace_all_inputs(self.g.get_nodes(), initializer_input_id, squeeze_node.output[0])
self.all_nodes.append(squeeze_node)
return squeeze_node.output[0]
indices_size = ctx.make_node("Size", [indices_name])
indices_unsqueeze = ctx.make_node("Unsqueeze", [indices_name], attr={"axes": [1]})
zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, dtype=np.int64))
one_const = ctx.make_const(utils.make_name("one"), np.array(1, dtype=np.int64))
id_name = utils.make_name("sparse_softmax_id")
id_output = utils.port_name(id_name)
controlflow.make_range(ctx, zero_const.output[0], indices_size.output[0], one_const.output[0],
id_output, id_name, shape=[-1], dtype=TensorProto.INT64)
id_unsqueeze = ctx.make_node("Unsqueeze", [id_output], attr={"axes": [1]})
indices_with_id = ctx.make_node("Concat",
[id_unsqueeze.output[0], indices_unsqueeze.output[0]],
attr={"axis": 1})
log_softmax = ctx.make_node(op_type="LogSoftmax",
inputs=[logit_name], dtypes=[logit_dtype], shapes=[logit_shape])
gathernd_name = utils.make_name("sparse_softmax_gathernd")
gathernd_output = utils.port_name(gathernd_name)
tensor.make_gathernd(ctx, log_softmax.output[0], indices_with_id.output[0], gathernd_output,
gathernd_name, logit_dtype, [logit_shape], [logit_dtype])
const_name = utils.make_name("const_negative_one")
const_negative_one = ctx.make_const(const_name, np.array(-1).astype(utils.map_onnx_to_numpy_type(logit_dtype)))
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], gathernd_output])
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node(op_type="Squeeze",
inputs=[mul2.output[0]], outputs=[node.output[0]],
attr={"axes": [1]}, shapes=[shapes[0]], dtypes=[dtypes[0]])
def version_1(cls, ctx, node, **kwargs):
"""Sign op."""
# T sign = Sign(T Input)
node_dtype = ctx.get_dtype(node.output[0])
utils.make_sure(node_dtype, "Dtype of {} is None".format(node.name))
if node_dtype in [onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]:
raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now")
zero_name = utils.make_name("{}_zero".format(node.name))
ctx.make_const(zero_name, np.array(0, dtype=np.float32))
if node_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]:
cast_node_0 = ctx.make_node("Cast", [node.input[0]], {"to": onnx_pb.TensorProto.FLOAT})
greater_node = ctx.make_node("Greater", [cast_node_0.output[0], zero_name])
less_node = ctx.make_node("Less", [cast_node_0.output[0], zero_name])
else:
greater_node = ctx.make_node("Greater", [node.input[0], zero_name])
less_node = ctx.make_node("Less", [node.input[0], zero_name])
cast_node_1 = ctx.make_node("Cast", [greater_node.output[0]], {"to": node_dtype})
cast_node_2 = ctx.make_node("Cast", [less_node.output[0]], {"to": node_dtype})
shapes = node.output_shapes
dtypes = node.output_dtypes
ctx.remove_node(node.name)
ctx.make_node("Sub", [cast_node_1.output[0], cast_node_2.output[0]], outputs=[node.output[0]],
shapes=shapes, dtypes=dtypes)
def _merge_shapes_for_tf(shape1, shape2):
"""
Merge 2 shapes, return merged shape, set unknown for dims with different values.
Raise exception for mismatch.
"""
if shape1 is None:
return shape2
if shape2 is None:
return shape1
utils.make_sure(utils.is_list_or_tuple(shape1), "invalid type for shape1")
utils.make_sure(utils.is_list_or_tuple(shape2), "invalid type for shape2")
utils.make_sure(len(shape1) == len(shape2), "shapes rank mismatch: shape1=%s, shape2=%s", shape1, shape2)
merged = []
for d1, d2 in zip(shape1, shape2):
d = d1
if d1 is None:
d = d2
elif not d2 is None:
# None means unknown in tensorflow
d = None
merged.append(d)
return merged
def get_attr_int(self, name):
"""Get attribute value as int."""
attr_int = self.get_attr_value(name)
utils.make_sure(
attr_int is not None and isinstance(attr_int, int),
"attribute %s is None", name
)
return attr_int
if not shape_op or shape_op.type != "Shape":
return False
return set_shape_from_input(shape_op.inputs[0], op.outputs[0])
if op.type == "Gather":
# uses the follwing link to know how to infer shape of output
# https://www.tensorflow.org/api_docs/python/tf/gather
shape_params = utils.get_tf_tensor_shape(op.inputs[0])
shape_indices = utils.get_tf_tensor_shape(op.inputs[1])
# gather can only have 2 inputs
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/gather.html
if len(op.inputs) == 3:
axis_op = op.inputs[2].op
if not utils.is_tf_const_op(axis_op):
return False
axis = utils.get_tf_const_value(axis_op)
else:
axis = 0
shape = shape_params[:axis] + shape_indices + shape_params[axis + 1:]
op.outputs[0].set_shape(shape)
return True
if op.type in ["All", "Any", "Max", "Min"]:
axis_op = op.inputs[1].op
if not utils.is_tf_const_op(axis_op):
return False
axis = utils.get_tf_const_value(axis_op)
if not isinstance(axis, list):
axis = [axis]
keep_dims = op.get_attr("keep_dims")
shape = utils.get_tf_tensor_shape(op.inputs[0])
body_nodes = set(cell_g_info.nodes + cond_g_info.nodes)
body_outputs = cond_g_info.outputs + cell_g_info.outputs
for out_tensor_value_info in body_outputs:
shape = out_tensor_value_info.shape
utils.make_sure(
shape is not None,
"Conversion of Loop requries output shape [{}] exists".format(out_tensor_value_info.id)
)
out_tensor_value_info.shape = utils.create_vague_shape_like(shape)
loop_body_g = LoopRewriterBase.construct_graph_from_nodes(self.g, body_nodes, body_outputs)
# create loop body graph inputs
loop_body_g.add_graph_input(utils.make_name("i"), TensorProto.INT64, ())
loop_body_g.add_graph_input(utils.make_name("cond"), TensorProto.BOOL, ())
for i, tensor_value_info in enumerate(loop_props.state_inputs):
input_name = tensor_value_info.id
if input_name is None:
# if the variable is not used in the body graph, then we created a fake one,
# the same type and shape as its corresponding output.
out_tensor_value_info = loop_props.state_outputs[i]
dtype = out_tensor_value_info.dtype
shape = out_tensor_value_info.shape
input_name = utils.make_name("unused_state_input_")
else:
dtype = tensor_value_info.dtype
shape = tensor_value_info.shape
loop_body_g.add_graph_input(input_name, dtype, utils.create_vague_shape_like(shape))
for input_ta in loop_props.tensor_array_inputs:
func = func_map.get(op.type)
if func is None:
continue
try:
inputs = []
for node in op.inputs:
if not node.is_const():
break
inputs.append(node.get_tensor_value(as_list=False))
logger.debug("op name %s, %s, %s", op.name, len(op.input), len(inputs))
if inputs and len(op.input) == len(inputs):
logger.info("folding node type=%s, name=%s" % (op.type, op.name))
if op.type == "Cast":
dst = op.get_attr_int("to")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(dst)
val = np.cast[np_type](*inputs)
elif op.type == "ConcatV2":
axis = inputs[-1]
values = inputs[:-1]
val = func(tuple(values), axis)
elif op.type == "ListDiff":
out_type = op.get_attr_int("out_idx")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(out_type)
val = func(*inputs)
val = val.astype(np_type)
elif op.type in ["Pack"]:
# handle ops that need input array and axis
axis = op.get_attr_int("axis")
val = func(inputs, axis=axis)
elif op.type == "Range":
dtype = op.get_attr_int("Tidx")