Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
raise RuntimeError('Missing type from coreml node:' + str(e))
@staticmethod
def convert(context, cm_node, inputs, outputs):
nb = NodeBuilder(context, 'ArrayFeatureExtractor', op_domain='ai.onnx.ml')
nb.extend_inputs(inputs)
target_index = cm_node.arrayFeatureExtractor.extractIndex
index_tensor = model_util.make_tensor('TargetIndex', onnx_proto.TensorProto.INT64, [len(target_index)], target_index)
nb.add_initializer(index_tensor)
nb.extend_outputs(outputs)
return nb.make_node()
# Register the class for processing
register_converter("arrayFeatureExtractor", ArrayFeatureExtractorConverter)
select_output = context.get_unique_name(output_name)
score_selector.add_output(select_output)
score_selector.add_attribute('starts', [0, 1])
score_selector.add_attribute('ends', [1, 2])
selector_output = model_util.make_tensor_value_info(select_output, onnx_proto.TensorProto.FLOAT, [1])
context.add_output(selector_output)
appended_node_zipmap = score_selector.make_node()
if appended_node_normalizer != None:
return [nb.make_node(), appended_node_normalizer, appended_node_zipmap]
else:
return [nb.make_node(), appended_node_zipmap]
# Register the class for processing
register_converter(svm.LinearSVC, GLMClassifierConverter)
register_converter(linear_model.LogisticRegression, GLMClassifierConverter)
register_converter(linear_model.SGDClassifier, GLMClassifierConverter)
Converts a CoreML DictVectorizer to ONNX
"""
nb = NodeBuilder(context, 'DictVectorizer', op_domain='ai.onnx.ml')
if cm_node.dictVectorizer.HasField('stringToIndex'):
nb.add_attribute('string_vocabulary', cm_node.dictVectorizer.stringToIndex.vector)
else:
nb.add_attribute('int64_vocabulary', cm_node.dictVectorizer.int64ToIndex.vector)
nb.extend_inputs(inputs)
nb.extend_outputs(outputs)
return nb.make_node()
# Register the class for processing
register_converter('dictVectorizer', DictVectorizerConverter)
raise ValueError('Invalid or missing input type for GLMRegressor.')
output_dim = None
try:
if len(inputs[0].type.tensor_type.shape.dim) > 0:
output_dim = [1, len(intercepts)]
except AttributeError as e:
raise ValueError('Invalid or missing input dimension for GLMRegressor.')
nb.add_output(model_util.make_tensor_value_info(nb.name, output_type, output_dim))
return nb.make_node()
# Register the class for processing
register_converter(svm.LinearSVR, GLMRegressorConverter)
register_converter(linear_model.LinearRegression, GLMRegressorConverter)
register_converter(linear_model.Ridge, GLMRegressorConverter)
register_converter(linear_model.SGDRegressor, GLMRegressorConverter)
probability_tensor_name = context.get_unique_name('probability_tensor')
nb.add_output(probability_tensor_name)
nodes = [nb.make_node()]
if cm_node.description.predictedProbabilitiesName != '':
# Find the corresponding ONNX name for CoreML's probability output (a dictionary)
predicted_probability_name = context.get_onnx_name(cm_node.description.predictedProbabilitiesName)
# Create a ZipMap to connect probability tensor and probability dictionary
nodes.append(model_util.make_zipmap_node(context, probability_tensor_name,
predicted_probability_name, class_labels))
return nodes
# Register the class for processing
register_converter("supportVectorClassifier", SupportVectorClassifierConverter)
Converts a CoreML Normalizer to ONNX
"""
norms = ['MAX', 'L1', 'L2']
nb = NodeBuilder(context, 'Normalizer', op_domain='ai.onnx.ml')
if cm_node.normalizer.normType in range(3):
nb.add_attribute('norm', norms[cm_node.normalizer.normType])
else:
raise RuntimeError('Invalid norm type: ' + cm_node.normalizer.normType)
nb.extend_inputs(inputs)
nb.extend_outputs(outputs)
return nb.make_node()
# Register the class for processing
register_converter('normalizer', NormalizerConverter)
nb.add_attribute('post_transform', "NONE")
output_dim = None
try:
if len(inputs[0].type.tensor_type.shape.dim) > 0:
output_dim = [1, inputs[0].type.tensor_type.shape.dim[0].dim_value]
except AttributeError as e:
raise ValueError('Invalid or missing input dimension.')
nb.add_attribute('n_supports', len(sk_node.support_))
nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, output_dim))
return nb.make_node()
# Register the class for processing
register_converter(sklearn.svm.SVC, SVCConverter)
register_converter(sklearn.svm.SVR, SVRConverter)
register_converter(sklearn.svm.NuSVC, SVCConverter)
register_converter(sklearn.svm.NuSVR, SVRConverter)
for k, v in attr_pairs.items():
nb.add_attribute(k, v)
nb.extend_inputs(inputs)
output_dim = [1, 1]
nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, output_dim))
return nb.make_node()
register_converter(DecisionTreeClassifier, DecisionTreeClassifierConverter)
register_converter(DecisionTreeRegressor, DecisionTreeRegressorConverter)
register_converter(RandomForestClassifier, RandomForestClassifierConverter)
register_converter(RandomForestRegressor, RandomForestRegressorConverter)
register_converter(GradientBoostingClassifier, GradientBoostingClassifierConverter)
register_converter(GradientBoostingRegressor, GradientBoostingRegressorConverter)
nb = SVMConverter.convert(context, sk_node, inputs, "SVMRegressor")
nb.add_attribute('post_transform', "NONE")
output_dim = None
try:
if len(inputs[0].type.tensor_type.shape.dim) > 0:
output_dim = [1, inputs[0].type.tensor_type.shape.dim[0].dim_value]
except AttributeError as e:
raise ValueError('Invalid or missing input dimension.')
nb.add_attribute('n_supports', len(sk_node.support_))
nb.add_output(model_util.make_tensor_value_info(nb.name, onnx_proto.TensorProto.FLOAT, output_dim))
return nb.make_node()
# Register the class for processing
register_converter(sklearn.svm.SVC, SVCConverter)
register_converter(sklearn.svm.SVR, SVRConverter)
register_converter(sklearn.svm.NuSVC, SVCConverter)
register_converter(sklearn.svm.NuSVR, SVRConverter)
nb = NodeBuilder(context, 'SVMRegressor', op_domain='ai.onnx.ml')
nb.add_attribute('kernel_type', svr_kernel)
nb.add_attribute('kernel_params', svr_kernel_params)
nb.add_attribute('support_vectors', support_vectors)
nb.add_attribute('n_supports', n_supports)
nb.add_attribute('coefficients', svr_coefficients)
nb.add_attribute('rho', svr_rho)
nb.extend_inputs(inputs)
nb.extend_outputs(outputs)
return nb.make_node()
# Register the class for processing
register_converter("supportVectorRegressor", SupportVectorRegressorConverter)