How to use the onnxruntime.InferenceSession function in onnxruntime

To help you get started, we’ve selected a few onnxruntime examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / sklearn-onnx / tests / test_sklearn_gaussian_process.py View on Github external
def test_kernel_rbf1(self):
        ker = RBF(length_scale=1, length_scale_bounds=(1e-3, 1e3))
        onx = convert_kernel(ker, 'X', output_names=['Y'], dtype=np.float32,
                             op_version=onnx_opset_version())
        model_onnx = onx.to_onnx(
            inputs=[('X', FloatTensorType([None, None]))])
        sess = InferenceSession(model_onnx.SerializeToString())
        res = sess.run(None, {'X': Xtest_.astype(np.float32)})[0]
        m1 = res
        m2 = ker(Xtest_)
        assert_almost_equal(m1, m2, decimal=5)
github onnx / onnxmltools / tests / end2end / test_keras_converter.py View on Github external
model = Sequential()
        model.add(Conv2D(2, kernel_size=(1, 2), strides=(1, 1), padding='valid', input_shape=(H, W, C),
                         data_format='channels_last'))  # , activation='softmax')
        model.add(MaxPooling2D((2, 2), strides=(2, 2), data_format='channels_last'))

        model.compile(optimizer='sgd', loss='mse')
        converted_model = onnxmltools.convert_keras(model, channel_first_inputs=[model.inputs[0].name])

        expected = model.predict(x)
        self.assertIsNotNone(expected)
        self.assertIsNotNone(converted_model)

        try:
            import onnxruntime
            sess = onnxruntime.InferenceSession(converted_model.SerializeToString())
            actual = sess.run([], {sess.get_inputs()[0].name:
                                         np.transpose(x.astype(np.float32), [0, 3, 1, 2])})
            self.assertTrue(np.allclose(expected, actual, rtol=1.e-3))
        except ImportError:
            pass
github onnx / sklearn-onnx / tests / test_algebra_onnx_operators.py View on Github external
assert nno[0].output_names == ['variable']
            assert len(nva) == 1
            assert isinstance(nva[0], tuple)
            assert nva[0][1] == 0

        def shape(operator):
            N = operator.inputs[0].type.shape[0]
            W = operator.raw_operator.W
            operator.outputs[0].type.shape = [N, W.shape[0]]

        model_onnx = convert_sklearn(
            tr, 'a-sub', [('input', FloatTensorType([None, 2]))],
            custom_shape_calculators={CustomOpTransformer: shape},
            custom_conversion_functions={CustomOpTransformer: conv})

        sess = InferenceSession(model_onnx.SerializeToString())
        z2 = sess.run(None, {'input': mat.astype(np.float32)})[0]
        assert_almost_equal(z, z2)
github axinc-ai / ailia-models / council-GAN / council-gan-glasses.py View on Github external
def process_video():
    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    if args.onnx:
        net = onnxruntime.InferenceSession('councilGAN-glasses.onnx')
    else:
        net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    if args.face_recognition:
        locator = FaceLocator()
    else:
        locator = None
        
    if args.video == '0':
        print('[INFO] Webcam mode is activated')
        capture = cv2.VideoCapture(0)
        if not capture.isOpened():
            print("[ERROR] webcamera not found")
            sys.exit(1)
    else:
        if check_file_existance(args.video):
github onnx / sklearn-onnx / docs / examples / plot_tfidfvectorizer.py View on Github external
}
}
model_onnx = convert_sklearn(pipeline, "tfidf",
                             initial_types=[
                                 ("input", StringTensorType([None, 2]))],
                             options=seps)

#################################
# And save.
with open("pipeline_tfidf.onnx", "wb") as f:
    f.write(model_onnx.SerializeToString())

##########################
# Predictions with onnxruntime.

sess = rt.InferenceSession("pipeline_tfidf.onnx")
print('---', train_data[0])
inputs = {'input': train_data[:1]}
pred_onx = sess.run(None, inputs)
print("predict", pred_onx[0])
print("predict_proba", pred_onx[1])

############################
# With *scikit-learn*:
print(pipeline.predict(train_data[:1]))
print(pipeline.predict_proba(train_data[:1]))

###############################
# There are discrepencies for this model because
# the tokenization is not exactly the same.
# This is a work in progress.
github microsoft / OLive / docker-images / onnx-converter / src / create_input.py View on Github external
if not os.path.exists(test_path):
        os.mkdir(test_path)
        os.chmod(test_path, 0o644)
    # Check if test folder and test data already exists
    regex = re.compile("test_data*")
    for f in os.listdir(output_test_data_dir):
        if regex.match(f):
            user_data_path = os.path.join(output_test_data_dir, f)
            for inputFiles in os.listdir(user_data_path):
                if inputFiles.endswith('.pb'):
                    print("Test data .pb files already exist. Skipping dummy input generation. ")
                    return test_path

    # Get input names from converted model
    sess = rt.InferenceSession(output_model_path)

    #########################
    # Let's see the input name and shape.
    print("%s inputs: " % output_model_path)
    inputs = sess.get_inputs()
    for i in range(0, len(inputs)):
        print("input name: %s, shape: %s, type: %s" % (inputs[i].name, inputs[i].shape, inputs[i].type))
        # If the input has None dimensions, replace with 1
        shape_corrected = [1 if x == None else x for x in inputs[i].shape]
        if inputs[i].type == "tensor(string)" or not all(isinstance(dim, int) for dim in shape_corrected):
            shutil.rmtree(test_path)
            raise ValueError(
                "Cannot auto generate inputs. Please provide your own input .pb files under output_onnx_path folder. ")
        # Create random input and write to .pb
        create_tensor("input_%s.pb" % i, shape_corrected, inputs[i].name, test_path, TYPE_MAP.get(inputs[i].type))
github onnx / sklearn-onnx / docs / examples / plot_onnx_operators.py View on Github external
def predict_with_onnxruntime(model_def, *inputs):
    import onnxruntime as ort
    sess = ort.InferenceSession(model_def.SerializeToString())
    names = [i.name for i in sess.get_inputs()]
    dinputs = {name: input for name, input in zip(names, inputs)}
    res = sess.run(None, dinputs)
    names = [o.name for o in sess.get_outputs()]
    return {name: output for name, output in zip(names, res)}
github microsoft / onnxruntime / docs / python / examples / plot_profiling.py View on Github external
example1 = get_example("mul_1.onnx")
sess = rt.InferenceSession(example1)
input_name = sess.get_inputs()[0].name

x = numpy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=numpy.float32)
res = sess.run(None, {input_name: x})
print(res)

#########################
# We need to enable to profiling
# before running the predictions.

options = rt.SessionOptions()
options.enable_profiling = True
sess_profile = rt.InferenceSession(example1, options)
input_name = sess.get_inputs()[0].name

x = numpy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=numpy.float32)

sess.run(None, {input_name: x})
prof_file = sess_profile.end_profiling()
print(prof_file)

###########################
# The results are stored un a file in JSON format.
# Let's see what it contains.
import json
with open(prof_file, "r") as f:
    sess_time = json.load(f)
import pprint
pprint.pprint(sess_time)
github lwnn / lwnn / tools / onnx2lwnn.py View on Github external
def is_type_okay(oT):
            oldoutputs = [n for n in self.onnx_model.graph.output]
            del self.onnx_model.graph.output[:]
            newoutputs = [onnx.helper.make_tensor_value_info(output, oT, None)]
            self.onnx_model.graph.output.extend(newoutputs)
            onnx.save(self.onnx_model, '.tmp.onnx')
            del self.onnx_model.graph.output[:]
            self.onnx_model.graph.output.extend(oldoutputs)
            try:
                sess = onnxruntime.InferenceSession('.tmp.onnx')
                return True
            except:
                return False
        for oT in [onnx.TensorProto.FLOAT, onnx.TensorProto.INT64, onnx.TensorProto.INT32]: