Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
test_array = [1,2,3]
tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
nb.add_initializer(tensor)
node = nb.make_node()
nb2 = NodeBuilder(context, 'bar2')
nb2.add_input('Output')
nb2.add_output('Output2')
tensor2 = model_util.make_tensor('classes2', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
nb2.add_initializer(tensor2)
node2 = nb2.make_node()
mb.add_nodes([node.onnx_node, node2.onnx_node])
mb.add_initializers(node.initializers)
mb.add_initializers(node2.initializers)
mb.add_inputs([model_util.make_tensor_value_info('Input', onnx_proto.TensorProto.FLOAT, [1])])
mb.add_outputs([model_util.make_tensor_value_info('Output', onnx_proto.TensorProto.FLOAT, [1])])
model = mb.make_model()
self.assertEqual(len(model.graph.initializer), 2)
self.assertEqual(model.graph.initializer[0].name, 'bar_classes')
self.assertEqual(model.graph.initializer[1].name, 'bar2_classes2')
context = ConvertContext()
# create nodes with initializers
mb = ModelBuilder()
nb = NodeBuilder(context, 'bar')
nb.add_input('Input')
nb.add_output('Output')
test_array = [1,2,3]
tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
nb.add_initializer(tensor)
node = nb.make_node()
mb.add_nodes([node.onnx_node])
mb.add_initializers(node.initializers)
mb.add_inputs([model_util.make_tensor_value_info('Input', onnx_proto.TensorProto.FLOAT, [1])])
mb.add_outputs([model_util.make_tensor_value_info('Output', onnx_proto.TensorProto.FLOAT, [1])])
model = mb.make_model()
self.assertEqual(len(model.graph.initializer), 1)
self.assertEqual(model.graph.initializer[0].name, 'bar_classes')
tensor = model_util.make_tensor('classes', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
nb.add_initializer(tensor)
node = nb.make_node()
nb2 = NodeBuilder(context, 'bar2')
nb2.add_input('Output')
nb2.add_output('Output2')
tensor2 = model_util.make_tensor('classes2', onnx_proto.TensorProto.FLOAT, [1,len(test_array)], test_array)
nb2.add_initializer(tensor2)
node2 = nb2.make_node()
mb.add_nodes([node.onnx_node, node2.onnx_node])
mb.add_initializers(node.initializers)
mb.add_initializers(node2.initializers)
mb.add_inputs([model_util.make_tensor_value_info('Input', onnx_proto.TensorProto.FLOAT, [1])])
mb.add_outputs([model_util.make_tensor_value_info('Output', onnx_proto.TensorProto.FLOAT, [1])])
model = mb.make_model()
self.assertEqual(len(model.graph.initializer), 2)
self.assertEqual(model.graph.initializer[0].name, 'bar_classes')
self.assertEqual(model.graph.initializer[1].name, 'bar2_classes2')
def convert(context, sk_node, inputs):
nb = NodeBuilder(context, "Normalizer", op_domain='ai.onnx.ml')
norm_enum = {'max': 'MAX', 'l1': 'L1', 'l2': 'L2'}
if sk_node.norm in norm_enum.keys():
nb.add_attribute('norm', norm_enum[sk_node.norm])
else:
raise RuntimeError("Invalid norm:" + sk_node.norm)
nb.extend_inputs(inputs)
try:
output_dim = [d.dim_value for d in inputs[0].type.tensor_type.shape.dim]
except AttributeError as e:
raise ValueError('Invalid or missing input dimension for Normalizer.')
nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, output_dim))
return nb.make_node()
def add_normalizer(input_name, output_type, norm, context):
prob_output_name = context.get_unique_name(input_name)
prob_output = model_util.make_tensor_value_info(prob_output_name, output_type)
appended_node = model_util.make_normalizer_node(context, input_name, prob_output, norm)
return appended_node, prob_output_name
attr_pairs['n_targets'] = sk_node.n_outputs_
tree_weight = 1. / sk_node.n_estimators
for i in range(sk_node.n_estimators):
tree = sk_node.estimators_[i].tree_
tree_id = i
_add_tree_to_attribute_pairs(attr_pairs, False, tree, tree_id, tree_weight, 0, False)
nb = NodeBuilder(context, "TreeEnsembleRegressor", op_domain='ai.onnx.ml')
for k, v in attr_pairs.items():
nb.add_attribute(k, v)
nb.extend_inputs(inputs)
output_dim = [1, sk_node.n_outputs_]
nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, output_dim))
return nb.make_node()
if multi_class == 2:
nb.add_attribute('post_transform', 'SOFTMAX')
else:
nb.add_attribute('post_transform', 'LOGISTIC')
if utils.is_string_type(classes):
class_labels = utils.cast_list(str, classes)
nb.add_attribute('classlabels_strings', class_labels)
output_type = onnx_proto.TensorProto.STRING
else:
class_labels = utils.cast_list(int, classes)
nb.add_attribute('classlabels_ints', class_labels)
output_type = onnx_proto.TensorProto.INT64
nb.extend_inputs(inputs)
output_y = model_util.make_tensor_value_info(nb.name, output_type, [1, 1])
nb.add_output(output_y)
context.add_output(output_y)
prob_input = context.get_unique_name('classProbability')
nb.add_output(prob_input)
output_name = prob_input
appended_node_normalizer = None
# Add normalizer in the case of multi-class.
if multi_class > 0 and sk_node.__class__.__name__ != 'LinearSVC':
appended_node_normalizer, output_name = add_normalizer(prob_input, output_type, "L1", context)
# Add a ZipMap to handle the map output.
if len(classes) > 2 or sk_node.__class__.__name__ != 'LinearSVC':
appended_node_zipmap = add_zipmap(output_name, output_type, class_labels, context)