Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_add_inputs(self):
context = ConvertContext()
nb = NodeBuilder(context, "foo")
nb.add_input('test')
nb.add_empty_input()
nb.add_input(model_util.make_tensor_value_info('value_test', onnx_proto.TensorProto.FLOAT, [1, 3]))
test_array = [1,2,3]
init = model_util.make_tensor('init', onnx_proto.TensorProto.FLOAT, [1, len(test_array)], test_array)
nb.add_initializer(init)
value = model_util.make_tensor('value', onnx_proto.TensorProto.FLOAT, [1, len(test_array)], test_array)
nb.add_value(value)
node = nb.make_node()
input_names = node.input_names
self.assertEqual(len(input_names),5)
# Confirm the order of the names based upon when added
expected_names = ['test','','value_test','foo_init', 'foo_value']
self.assertEqual(input_names, expected_names)
def test_optimizer(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] =\
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)))]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
nodes[5:] = [helper.make_node('Transpose', ['tranpose0'], ['tranpose1'], perm=(0, 3, 1, 2))]
nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
W = op.get_weights()[0].T
container.add_initializer(tensor_w_name, onnx_proto.TensorProto.FLOAT,
[1, 3 * hidden_size, input_size], W.flatten())
gru_input_names.append(tensor_w_name)
tensor_r_name = scope.get_unique_variable_name('tensor_r')
R = op.get_weights()[1].T
container.add_initializer(tensor_r_name, onnx_proto.TensorProto.FLOAT,
[1, 3 * hidden_size, hidden_size], R.flatten())
gru_input_names.append(tensor_r_name)
B = op.get_weights()[2]
if op.use_bias and len(B) > 0:
tensor_b_name = scope.get_unique_variable_name('tensor_b')
B = np.concatenate([B, np.zeros(3 * hidden_size)])
container.add_initializer(tensor_b_name, onnx_proto.TensorProto.FLOAT, [1, 6 * hidden_size], B.flatten())
gru_input_names.append(tensor_b_name)
else:
gru_input_names.append('')
# sequence lens
gru_input_names.append('')
# TODO: figure out keras way of inital_h
gru_input_names.append('')
activation_types = []
alphas = []
betas = []
for (activation_type, alpha, beta) in \
[extract_recurrent_activation(op.recurrent_activation), extract_recurrent_activation(op.activation)]:
activation_types.append(activation_type.encode('utf-8'))
if alpha is not None:
pre_nb3.add_input(h_init_name)
pre_nb3.add_output(h_init_reshaped_name)
nb.add_input(h_init_reshaped_name)
else:
nb.add_empty_input()
# initial_c
if len(inputs) > 2:
pre_nb4 = NodeBuilder(context, 'Concat')
builder_list.append(pre_nb4)
pre_nb4.add_attribute('axis', 0)
zero_initializer = model_util.make_tensor('c_init', onnx_proto.TensorProto.FLOAT,
[1, hidden_size], [0.] * hidden_size)
pre_nb4.add_initializer(zero_initializer, inputs[2])
zero_initializer = model_util.make_tensor('c_init_rev', onnx_proto.TensorProto.FLOAT,
[1, hidden_size], [0.] * hidden_size)
pre_nb4.add_initializer(zero_initializer, inputs[4])
c_init_name = context.get_unique_name('c_init')
pre_nb4.add_output(c_init_name)
pre_nb5 = NodeBuilder(context, 'Reshape')
builder_list.append(pre_nb5)
pre_nb5.add_attribute('shape', [2, 1, hidden_size])
pre_nb5.add_input(c_init_name)
c_init_reshaped_name = context.get_unique_name('c_init_reshaped')
pre_nb5.add_output(c_init_reshaped_name)
nb.add_input(c_init_reshaped_name)
else:
nb.add_empty_input()
def create_scaler(input, output_name, scale, offset, context):
nb = NodeBuilder(context, "Scaler", op_domain='ai.onnx.ml')
nb.add_attribute('scale', [scale])
nb.add_attribute('offset', [offset])
nb.add_input(input)
# Flatten out the input dims to create the tensor
output_shape = [x.dim_value for x in input.type.tensor_type.shape.dim]
output = make_tensor_value_info(context.get_unique_name(output_name), onnx_proto.TensorProto.FLOAT, output_shape)
nb.add_output(output)
return nb.make_node()
zeroth_col_name = scope.get_unique_variable_name('zeroth_col')
denominator_name = scope.get_unique_variable_name('denominator')
modified_first_col_name = scope.get_unique_variable_name(
'modified_first_col')
unit_float_tensor_name = scope.get_unique_variable_name(
'unit_float_tensor')
merged_prob_name = scope.get_unique_variable_name('merged_prob')
predicted_label_name = scope.get_unique_variable_name(
'predicted_label')
classes_name = scope.get_unique_variable_name('classes')
final_label_name = scope.get_unique_variable_name('final_label')
container.add_initializer(
col_index_name, onnx_proto.TensorProto.INT64, [], [1])
container.add_initializer(
unit_float_tensor_name, onnx_proto.TensorProto.FLOAT,
[], [1.0])
container.add_initializer(
denominator_name, onnx_proto.TensorProto.FLOAT, [],
[100.0])
container.add_initializer(classes_name, class_type,
[len(class_labels)], class_labels)
container.add_node(
'ArrayFeatureExtractor',
[probability_tensor_name, col_index_name],
first_col_name,
name=scope.get_unique_operator_name(
'ArrayFeatureExtractor'),
op_domain='ai.onnx.ml')
apply_div(scope, [first_col_name, denominator_name],
modified_first_col_name, container, broadcast=1)
nb = NodeBuilder(context, 'Imputer', op_domain='ai.onnx.ml')
nb.add_attribute('imputed_value_floats', sk_node.statistics_)
replaced_value = 0.0
if isinstance(sk_node.missing_values, str):
if sk_node.missing_values == 'NaN':
replaced_value = np.NaN
elif isinstance(sk_node.missing_values, float):
replaced_value = float(sk_node.missing_values)
else:
raise RuntimeError('Unsupported missing value')
nb.add_attribute('replaced_value_float', replaced_value)
nb.extend_inputs(imputer_inputs)
nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, [1, num_features]))
nodes.append(nb.make_node())
return nodes
input_size = op.input_shape[-1]
seq_length = op.input_shape[-2]
output_seq = op.return_sequences
reverse_input = op.go_backwards
attrs = {'name': operator.full_name}
rnn_input_names = []
rnn_output_names = []
rnn_x_name = scope.get_unique_variable_name('rnn_x')
apply_transpose(scope, operator.inputs[0].full_name, rnn_x_name, container, perm=[1, 0, 2])
rnn_input_names.append(rnn_x_name)
tensor_w_name = scope.get_unique_variable_name('tensor_w')
W = op.get_weights()[0].T
container.add_initializer(tensor_w_name, onnx_proto.TensorProto.FLOAT, [1, hidden_size, input_size], W.flatten())
rnn_input_names.append(tensor_w_name)
tensor_r_name = scope.get_unique_variable_name('tensor_r')
R = op.get_weights()[1].T
container.add_initializer(tensor_r_name, onnx_proto.TensorProto.FLOAT, [1, hidden_size, hidden_size], R.flatten())
rnn_input_names.append(tensor_r_name)
if op.use_bias:
tensor_b_name = scope.get_unique_variable_name('tensor_b')
B = np.concatenate([op.get_weights()[2], np.zeros(hidden_size)])
container.add_initializer(tensor_b_name, onnx_proto.TensorProto.FLOAT, [1, 2 * hidden_size], B.flatten())
rnn_input_names.append(tensor_b_name)
else:
rnn_input_names.append('')
# sequence_lens is not able to be converted from input_length