Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# tensors and we will adjust them subsequently to mimic Keras output format.
gru_y_name = scope.get_unique_variable_name('gru_y')
gru_h_name = scope.get_unique_variable_name('gru_h')
gru_output_names = [gru_y_name, gru_h_name]
container.add_node(op_type, gru_input_names, gru_output_names, op_version=op_version, **attrs)
# Create output-adjusting operators
if output_seq:
intermediate_result_name = scope.get_unique_variable_name('intermediate_result')
apply_transpose(scope, gru_y_name, intermediate_result_name, container, perm=[1, 0, 2])
apply_reshape(scope, intermediate_result_name, operator.outputs[0].full_name, container,
desired_shape=[-1, seq_length, hidden_size])
else:
# Here we ignore ONNX GRU's first output because it's useless.
intermediate_result_name = scope.get_unique_variable_name('intermediate_result')
apply_transpose(scope, gru_h_name, intermediate_result_name, container, perm=[1, 0, 2])
apply_reshape(scope, intermediate_result_name, operator.outputs[0].full_name, container,
desired_shape=[-1, hidden_size])
op = operator.raw_operator
# Derive permutation configuration. If the Keras input format is not channels_first, this configuration may be used
# to manipulate the input and output of ONNX Upsample.
input_perm_axes, output_perm_axes = get_permutation_config(n_dims)
channels_first = n_dims > 1 and op.data_format == 'channels_first'
# Before creating the main Upsample operator, we need to permute the input tensor if the original operator is
# working under channels_last mode.
if channels_first:
# No permutation is required. Use input as it is.
input_tensor_name = operator.inputs[0].full_name
else:
# Permute the original input and then use the permuted result as the input of ONNX Upsample
input_tensor_name = scope.get_unique_variable_name(operator.inputs[0].full_name + '_permuted')
apply_transpose(scope, operator.inputs[0].full_name, input_tensor_name, container, perm=input_perm_axes)
# Prepare attributes for ONNX Pad
mode = 'constant'
pads = get_padding_config(op, n_dims)
# If channels_first is True, we don't need to permute the output of ONNX Upsample. Otherwise, similar to Crop's
# conversion, a Transpose would be added.
if channels_first:
apply_pad(scope, input_tensor_name, operator.outputs[0].full_name, container, mode=mode, pads=pads, value=0.)
else:
intermediate_tensor_name = scope.get_unique_variable_name(input_tensor_name + '_padded')
apply_pad(scope, input_tensor_name, intermediate_tensor_name, container, mode=mode, pads=pads, value=0.)
apply_transpose(scope, intermediate_tensor_name, operator.outputs[0].full_name, container,
perm=output_perm_axes)
# Permute the original input and then use the permuted result as the input of ONNX Upsample
input_tensor_name = scope.get_unique_variable_name(operator.inputs[0].full_name + '_permuted')
apply_transpose(scope, operator.inputs[0].full_name, input_tensor_name, container, perm=input_perm_axes)
# Prepare attributes for ONNX Pad
mode = 'constant'
pads = get_padding_config(op, n_dims)
# If channels_first is True, we don't need to permute the output of ONNX Upsample. Otherwise, similar to Crop's
# conversion, a Transpose would be added.
if channels_first:
apply_pad(scope, input_tensor_name, operator.outputs[0].full_name, container, mode=mode, pads=pads, value=0.)
else:
intermediate_tensor_name = scope.get_unique_variable_name(input_tensor_name + '_padded')
apply_pad(scope, input_tensor_name, intermediate_tensor_name, container, mode=mode, pads=pads, value=0.)
apply_transpose(scope, intermediate_tensor_name, operator.outputs[0].full_name, container,
perm=output_perm_axes)
def convert_keras_permute(scope, operator, container):
axes = [0] + list(operator.raw_operator.dims)
apply_transpose(scope, operator.inputs[0].full_name, operator.outputs[0].full_name, container, perm=axes)
op_version = 7
# We declare some names to store the outputs produced by ONNX LSTM. Then, create ONNX LSTM. Subsequently, its
# outputs may be adjusted to match Keras format.
lstm_y_name = scope.get_unique_variable_name('lstm_y')
lstm_output_names.append(lstm_y_name)
lstm_h_name = scope.get_unique_variable_name('lstm_h')
lstm_output_names.append(lstm_h_name)
lstm_c_name = scope.get_unique_variable_name('lstm_c')
lstm_output_names.append(lstm_c_name)
container.add_node(lstm__type, lstm_input_names, lstm_output_names, op_version=op_version, **lstm_attrs)
# Create output-adjusting operators
if output_seq:
lstm_y_name_transposed = scope.get_unique_variable_name('lstm_y_transposed')
apply_transpose(scope, lstm_y_name, lstm_y_name_transposed, container, perm=[1, 0, 2])
apply_reshape(scope, lstm_y_name_transposed, operator.outputs[0].full_name, container,
desired_shape=[-1, seq_length, hidden_size])
else:
apply_reshape(scope, lstm_h_name, operator.outputs[0].full_name, container, desired_shape=[-1, hidden_size])
if output_state:
# state_h
apply_reshape(scope, lstm_h_name, operator.outputs[1].full_name, container, desired_shape=[-1, hidden_size])
# state_c
apply_reshape(scope, lstm_c_name, operator.outputs[2].full_name, container, desired_shape=[-1, hidden_size])
def convert_keras_conv_core(scope, operator, container, is_transpose, n_dims, input_perm_axes,
output_perm_axes, weight_perm_axes):
op = operator.raw_operator
is_separable_conv = isinstance(op, SeparableConv2D) or \
(StrictVersion(keras.__version__) >= StrictVersion('2.1.3') and isinstance(op, SeparableConv1D))
channels_first = n_dims > 1 and op.data_format == 'channels_first'
# Unless channels_first is the Keras data format, the inputs and weights in Keras v.s. ONNX
# are reversed. This is annoying, and inefficient as we'll have to use transposes.
if channels_first:
adjusted_input_name = operator.inputs[0].full_name
else:
adjusted_input_name = scope.get_unique_variable_name('adjusted_input')
apply_transpose(scope, operator.inputs[0].full_name, adjusted_input_name, container, perm=input_perm_axes)
op_type = 'ConvTranspose' if is_transpose else 'Conv'
convolution_input_names = [adjusted_input_name]
parameters = op.get_weights()
if is_separable_conv:
attrs = {'name': operator.full_name + '0'}
assert (len(parameters) == 3 if op.use_bias else 2)
else:
attrs = {'name': operator.full_name}
assert (len(parameters) == 2 if op.use_bias else 1)
weight_params = parameters[0]
input_channels, output_channels = weight_params.shape[-2:]
kernel_size = weight_params.shape[:-2]
if channels_first:
# No permutation is required. Use input as it is.
input_tensor_name = operator.inputs[0].full_name
else:
# Permute the original input and then use the permuted result as the input of ONNX Upsample
input_tensor_name = scope.get_unique_variable_name(operator.inputs[0].full_name + '_permuted')
apply_transpose(scope, operator.inputs[0].full_name, input_tensor_name, container, perm=input_perm_axes)
# If channels_first is True, we don't need to permute the output of ONNX Upsample. Otherwise, similar to Crop's
# conversion, a Transpose would be added.
if channels_first:
apply_upsample(scope, input_tensor_name, operator.outputs[0].full_name, container, scales=scales)
else:
upsampled_tensor_name = scope.get_unique_variable_name(input_tensor_name + '_upsampled')
apply_upsample(scope, input_tensor_name, upsampled_tensor_name, container, scales=scales)
apply_transpose(scope, upsampled_tensor_name, operator.outputs[0].full_name, container, perm=output_perm_axes)
intermediate_output_name, **attrs)
if is_separable_conv:
intermediate_output_name = process_separable_conv_2nd(scope, operator, container, [intermediate_output_name], n_dims,
weight_perm_axes, parameters, attrs['auto_pad'])
# The construction of convolution is done. Now, we create an activation operator to apply the activation specified
# in this Keras layer.
apply_activation_function = _activation_map[op.activation]
activation_output_name = scope.get_unique_variable_name('activation_output')
apply_activation_function(scope, intermediate_output_name, activation_output_name, container)
# Permute the output back of its original format
if not channels_first:
# Generate a final transposer.
apply_transpose(scope, activation_output_name, operator.outputs[0].full_name, container, perm=output_perm_axes)
else:
apply_identity(scope, activation_output_name, operator.outputs[0].full_name, container)