-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
plot_backend.py
57 lines (47 loc) · 1.62 KB
/
plot_backend.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
.. _l-example-backend-api:
ONNX Runtime Backend for ONNX
=============================
*ONNX Runtime* extends the
`onnx backend API <https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md>`_
to run predictions using this runtime.
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load
name = datasets.get_example("logreg_iris.onnx")
model = load(name)
rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
print(e)
########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())
########################################
# The backend can also directly load the model
# without using *onnx*.
rep = backend.prepare(name, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
print(e)
#######################################
# The backend API is implemented by other frameworks
# and makes it easier to switch between multiple runtimes
# with the same API.