Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def ConcatONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
for arg in op_def.arg:
if arg.name == 'axis':
node_proto.attribute.extend([
make_attribute('axis', arg.i)])
return node_proto, None
def set_reduction_attrs(node, param):
a = onnx.helper.make_attribute("axes", param.axes)
k = onnx.helper.make_attribute("keepdims", param.keep_dims)
node.attribute.extend([a, k])
make_attribute('to', TensorProto.INT32)])
elif arg.s.upper() == b'INT64':
node_proto.attribute.extend([
make_attribute('to', TensorProto.INT64)])
elif arg.s.upper() == b'FLOAT16':
node_proto.attribute.extend([
make_attribute('to', TensorProto.FLOAT16)])
if arg.s.upper() == b'FLOAT32':
node_proto.attribute.extend([
make_attribute('to', TensorProto.FLOAT)])
elif arg.s.upper() == b'FLOAT64':
node_proto.attribute.extend([
make_attribute('to', TensorProto.DOUBLE)])
else:
node_proto.attribute.extend([
make_attribute('to', TensorProto.UNDEFINED)])
return node_proto, const_tensors
def ProposalONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
node_proto.op_type = 'ATen' # Template
node_proto.attribute.extend([make_attribute('op_type', 'Proposal')])
for arg in op_def.arg:
if arg.name == 'strides':
node_proto.attribute.extend([
make_attribute('strides', arg.ints)])
elif arg.name == 'ratios':
node_proto.attribute.extend([
make_attribute('ratios', arg.floats)])
elif arg.name == 'scales':
node_proto.attribute.extend([
make_attribute('scales', arg.floats)])
elif arg.name == 'pre_nms_top_n':
node_proto.attribute.extend([
make_attribute('pre_nms_top_n', arg.i)])
elif arg.name == 'post_nms_top_n':
node_proto.attribute.extend([
last_node = nodes[slice_chain[-1]]
input_name = first_node.input[0]
output_name = last_node.output[0]
processed = -1
if output_name in input_refs: # 0, [1...]
new_input_name = first_node.output[0] if merged_slice else input_name
processed = skip_node_forward(ret_nodes, output_name,
new_input_name, input_refs)
if processed > 0:
if merged_slice:
remain_idx = slice_chain[0]
remove_chain = slice_chain[1:]
slice_node = ret_nodes[remain_idx]
for attr in slice_node.attribute:
attr.CopyFrom(
make_attribute(attr.name, attrs[attr.name]))
logger.debug('merged slice chain %s -> %s%s -> %s',
input_name, remain_idx, remove_chain,
output_name)
else:
remove_chain = slice_chain
if processed < 0 and input_name in output_refs:
new_output_name = last_node.input[0] if merged_slice else output_name
processed = skip_node_backward(ret_nodes, input_name,
new_output_name, output_refs)
if processed > 0:
if merged_slice:
remain_idx = slice_chain[-1]
remove_chain = slice_chain[:-1]
slice_node = ret_nodes[remain_idx]
for attr in slice_node.attribute:
def set_attr(self, name, value):
self.attr[name] = helper.make_attribute(name, value)
# Weight data should be the second input for convolution
if len(func.input) < 2:
raise ValueError(
"Weight input is missing for convolution {}"
.format(func.name))
weight = func.input[1]
weight_var = [v for v in variables if v.name == weight]
if len(weight_var) != 1:
raise ValueError(
"No weight input was found, or multiple weight inputs were found"
" for convolution {} where there should be only one."
.format(func.name))
weight_shape = weight_var[0].shape
# The base axis for weights is the next axis from the data's base axis
weight_base = cp.base_axis + 1
k = onnx.helper.make_attribute("kernel_shape",
weight_shape.dim[weight_base:])
d = onnx.helper.make_attribute("dilations", cp.dilation.dim)
s = onnx.helper.make_attribute("strides", cp.stride.dim)
p = onnx.helper.make_attribute("pads", cp.pad.dim[:] * 2)
g = onnx.helper.make_attribute("group", cp.group)
n.attribute.extend([k, d, s, p, g])
nl.append(n)
elif func.type == "GlobalAveragePooling":
# We wipeout the node name to avoid a bug?
# that occurs when we use a GlobalAveragePooling node with a name
# "Conv" or "Pool" contained.
# Caffe2 issue is here:
# https://github.com/caffe2/caffe2/issues/1971
# Because a GlobalAveragePooling operator does not contain a kernel, we get an error at the
# following code if we have a specific name.
# https://github.com/caffe2/caffe2/blob/master/caffe2/operators/conv_pool_op_base.h#L167
make_attribute('nms_thresh', arg.f)])
elif arg.name == 'min_size':
node_proto.attribute.extend([
make_attribute('min_size', arg.i)])
elif arg.name == 'min_level':
node_proto.attribute.extend([
make_attribute('min_level', arg.i)])
elif arg.name == 'max_level':
node_proto.attribute.extend([
make_attribute('max_level', arg.i)])
elif arg.name == 'canonical_scale':
node_proto.attribute.extend([
make_attribute('canonical_scale', arg.i)])
elif arg.name == 'canonical_level':
node_proto.attribute.extend([
make_attribute('canonical_level', arg.i)])
return node_proto, const_tensors
def AffineONNXExporter(op_def, shape_dict, ws):
node_proto, const_tensors = CommonONNXExporter(op_def, shape_dict)
node_proto.op_type = 'ATen' # Template
node_proto.attribute.extend([make_attribute('op_type', 'Affine')])
for arg in op_def.arg:
if arg.name == 'axis':
node_proto.attribute.extend([
make_attribute('axis', arg.i)])
elif arg.name == 'num_axes':
node_proto.attribute.extend([
make_attribute('num_axes', arg.i)])
return node_proto, const_tensors
.format(func.name))
weight = func.input[1]
weight_var = [v for v in variables if v.name == weight]
if len(weight_var) != 1:
raise ValueError(
"No weight input was found, or multiple weight inputs were found"
" for convolution {} where there should be only one."
.format(func.name))
weight_shape = weight_var[0].shape
# The base axis for weights is the next axis from the data's base axis
weight_base = cp.base_axis + 1
k = onnx.helper.make_attribute("kernel_shape",
weight_shape.dim[weight_base:])
d = onnx.helper.make_attribute("dilations", cp.dilation.dim)
s = onnx.helper.make_attribute("strides", cp.stride.dim)
p = onnx.helper.make_attribute("pads", cp.pad.dim[:] * 2)
g = onnx.helper.make_attribute("group", cp.group)
n.attribute.extend([k, d, s, p, g])
nl.append(n)
elif func.type == "GlobalAveragePooling":
# We wipeout the node name to avoid a bug?
# that occurs when we use a GlobalAveragePooling node with a name
# "Conv" or "Pool" contained.
# Caffe2 issue is here:
# https://github.com/caffe2/caffe2/issues/1971
# Because a GlobalAveragePooling operator does not contain a kernel, we get an error at the
# following code if we have a specific name.
# https://github.com/caffe2/caffe2/blob/master/caffe2/operators/conv_pool_op_base.h#L167
# The above caffe2 code should be checking the node's operator name and not the node's name.
n.name = ""
nl.append(n)
elif func.type == "Softmax":