Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
cast_prob_name = scope.get_unique_variable_name('cast_prob')
bool_not_cast_prob_name = scope.get_unique_variable_name(
'bool_not_cast_prob')
mask_name = scope.get_unique_variable_name('mask')
masked_concatenated_prob_name = scope.get_unique_variable_name(
'masked_concatenated_prob')
n_classes_name = scope.get_unique_variable_name('n_classes')
reduced_prob_mask_name = scope.get_unique_variable_name(
'reduced_prob_mask')
masked_reduced_prob_name = scope.get_unique_variable_name(
'masked_reduced_prob')
container.add_initializer(n_classes_name, onnx_proto.TensorProto.FLOAT,
[], [n_classes])
apply_cast(scope, reduced_prob_name, cast_prob_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', cast_prob_name,
bool_not_cast_prob_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, bool_not_cast_prob_name, mask_name, container,
to=onnx_proto.TensorProto.FLOAT)
apply_add(scope, [concatenated_prob_name, mask_name],
masked_concatenated_prob_name, container, broadcast=1)
apply_mul(scope, [mask_name, n_classes_name], reduced_prob_mask_name,
container, broadcast=1)
apply_add(scope, [reduced_prob_name, reduced_prob_mask_name],
masked_reduced_prob_name, container, broadcast=0)
return masked_concatenated_prob_name, masked_reduced_prob_name
scope, array_feat_extractor_output_name, reshaped_weights_name,
container, desired_shape=(-1, len(op.estimators_)))
weights_cdf_name = cum_sum(
scope, container, reshaped_weights_name,
len(op.estimators_))
container.add_node(
'ArrayFeatureExtractor', [weights_cdf_name, last_index_name],
median_value_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
apply_mul(scope, [median_value_name, half_scalar_name],
comp_value_name, container, broadcast=1)
container.add_node(
'Less', [weights_cdf_name, comp_value_name],
median_or_above_name,
name=scope.get_unique_operator_name('Less'))
apply_cast(scope, median_or_above_name, cast_result_name,
container, to=container.proto_dtype)
container.add_node('ArgMin', cast_result_name,
median_idx_name,
name=scope.get_unique_operator_name('ArgMin'), axis=1)
_apply_gather_elements(
scope, container, [sorted_indices_name, median_idx_name],
median_estimators_name, axis=1, dim=len(op.estimators_),
zero_type=onnx_proto.TensorProto.INT64, suffix="A")
output_name = operator.output_full_names[0]
_apply_gather_elements(
scope, container, [concatenated_labels, median_estimators_name],
output_name, axis=1, dim=len(op.estimators_),
zero_type=onnx_proto.TensorProto.FLOAT, suffix="B")
'masked_concatenated_prob')
n_classes_name = scope.get_unique_variable_name('n_classes')
reduced_prob_mask_name = scope.get_unique_variable_name(
'reduced_prob_mask')
masked_reduced_prob_name = scope.get_unique_variable_name(
'masked_reduced_prob')
container.add_initializer(n_classes_name, onnx_proto.TensorProto.FLOAT,
[], [n_classes])
apply_cast(scope, reduced_prob_name, cast_prob_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', cast_prob_name,
bool_not_cast_prob_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, bool_not_cast_prob_name, mask_name, container,
to=onnx_proto.TensorProto.FLOAT)
apply_add(scope, [concatenated_prob_name, mask_name],
masked_concatenated_prob_name, container, broadcast=1)
apply_mul(scope, [mask_name, n_classes_name], reduced_prob_mask_name,
container, broadcast=1)
apply_add(scope, [reduced_prob_name, reduced_prob_mask_name],
masked_reduced_prob_name, container, broadcast=0)
return masked_concatenated_prob_name, masked_reduced_prob_name
to=onnx_proto.TensorProto.INT64)
name = cast_feature
container.add_node('OneHotEncoder', name,
ohe_output, op_domain='ai.onnx.ml',
**attrs)
categories_len += len(categories)
concat_result_name = scope.get_unique_variable_name('concat_result')
apply_concat(scope, result, concat_result_name, container, axis=2)
reshape_input = concat_result_name
if np.issubdtype(ohe_op.dtype, np.signedinteger):
reshape_input = scope.get_unique_variable_name('cast')
apply_cast(scope, concat_result_name, reshape_input,
container, to=onnx_proto.TensorProto.INT64)
apply_reshape(scope, reshape_input, operator.output_full_names,
container, desired_shape=(-1, categories_len))
equal_result_name = scope.get_unique_variable_name('equal_result')
cast_output_name = scope.get_unique_variable_name('cast_output')
reduced_proba_name = scope.get_unique_variable_name('reduced_proba')
container.add_initializer(
n_estimators_name, onnx_proto.TensorProto.FLOAT, [],
[len(model.estimators_)])
container.add_initializer(
class_labels_name, onnx_proto.TensorProto.INT64,
[1, 1, len(model.estimators_[0].classes_)],
model.estimators_[0].classes_)
container.add_node('Equal', [class_labels_name, merged_proba_name],
equal_result_name,
name=scope.get_unique_operator_name('Equal'))
apply_cast(scope, equal_result_name, cast_output_name,
container, to=onnx_proto.TensorProto.FLOAT)
container.add_node('ReduceSum', cast_output_name,
reduced_proba_name,
name=scope.get_unique_operator_name('ReduceSum'),
axes=[0], keepdims=0)
apply_div(scope, [reduced_proba_name, n_estimators_name],
final_proba_name, container, broadcast=1)
return final_proba_name
container.add_node('ArgMax', class_prob_name,
argmax_output_name,
name=scope.get_unique_operator_name('ArgMax'), axis=1)
container.add_node(
'ArrayFeatureExtractor', [classes_name, argmax_output_name],
array_feature_extractor_result_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
if class_type == onnx_proto.TensorProto.INT32:
reshaped_result_name = scope.get_unique_variable_name(
'reshaped_result')
apply_reshape(scope, array_feature_extractor_result_name,
reshaped_result_name, container,
desired_shape=(-1,))
apply_cast(scope, reshaped_result_name, operator.outputs[0].full_name,
container, to=onnx_proto.TensorProto.INT64)
else:
apply_reshape(scope, array_feature_extractor_result_name,
operator.outputs[0].full_name, container,
desired_shape=(-1,))
not_reduced_proba_name = scope.get_unique_variable_name(
'not_reduced_proba')
proba_updated_name = scope.get_unique_variable_name('proba_updated')
mask_name = scope.get_unique_variable_name('mask')
reduced_proba_updated_name = scope.get_unique_variable_name(
'reduced_proba_updated')
container.add_initializer(num_classes_name, onnx_proto.TensorProto.FLOAT,
[], [num_classes])
apply_cast(scope, reduced_proba, bool_reduced_proba_name, container,
to=onnx_proto.TensorProto.BOOL)
container.add_node('Not', bool_reduced_proba_name,
bool_not_reduced_proba_name,
name=scope.get_unique_operator_name('Not'))
apply_cast(scope, bool_not_reduced_proba_name, not_reduced_proba_name,
container, to=onnx_proto.TensorProto.FLOAT)
apply_add(scope, [proba, not_reduced_proba_name],
proba_updated_name, container, broadcast=1)
apply_mul(scope, [not_reduced_proba_name, num_classes_name],
mask_name, container, broadcast=1)
apply_add(scope, [reduced_proba, mask_name],
reduced_proba_updated_name, container, broadcast=0)
return proba_updated_name, reduced_proba_updated_name
container.add_node(
'ArrayFeatureExtractor', [classes_name, argmax_output_name],
array_feature_extractor_result_name, op_domain='ai.onnx.ml',
name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
output_shape = (-1,)
if class_type == onnx_proto.TensorProto.INT32:
cast2_result_name = scope.get_unique_variable_name('cast2_result')
reshaped_result_name = scope.get_unique_variable_name(
'reshaped_result')
apply_cast(scope, array_feature_extractor_result_name,
cast2_result_name, container,
to=onnx_proto.TensorProto.FLOAT)
apply_reshape(scope, cast2_result_name, reshaped_result_name,
container, desired_shape=output_shape)
apply_cast(scope, reshaped_result_name, output_full_name, container,
to=onnx_proto.TensorProto.INT64)
else: # string labels
apply_reshape(scope, array_feature_extractor_result_name,
output_full_name, container, desired_shape=output_shape)