Skip to content
This repository has been archived by the owner on Jan 13, 2024. It is now read-only.

Commit

Permalink
Adds function insert_results_into_onnx to insert results into a graph…
Browse files Browse the repository at this point in the history
… to debug (#309)

* Adds function insert_results_into_onnx to insert results into a graph for debugging purposes
* Update test_onnxrt_python_runtime_control_loop.py
* add function log1p, update for skl2onnx==1.9.3
* Update onnx_transformer.py
  • Loading branch information
xadupre committed Sep 21, 2021
1 parent 69efdd2 commit f83f9a8
Show file tree
Hide file tree
Showing 25 changed files with 440 additions and 45 deletions.
8 changes: 5 additions & 3 deletions _doc/sphinxdoc/source/api/tools.rst
Expand Up @@ -27,13 +27,15 @@ Export

.. autosignature:: mlprodict.onnx_tools.onnx_export.export2tf2onnx

Graphs
++++++
Graphs helper, maipulations
+++++++++++++++++++++++++++

Functions to help understand models or modify them.

.. autosignature:: mlprodict.tools.model_info.analyze_model

.. autosignature:: mlprodict.onnx_tools.onnx_manipulations.insert_results_into_onnx

.. autosignature:: mlprodict.onnx_tools.onnx_manipulations.enumerate_model_node_outputs

.. autosignature:: mlprodict.tools.code_helper.make_callable
Expand All @@ -48,7 +50,7 @@ Functions to help understand models or modify them.

.. autosignature:: mlprodict.testing.script_testing.verify_script

Onnx Optimisation
Onnx Optimization
+++++++++++++++++

The following functions reduce the number of ONNX operators in a graph
Expand Down
1 change: 1 addition & 0 deletions _doc/sphinxdoc/source/conf.py
Expand Up @@ -90,6 +90,7 @@
'mlinsights': 'http://www.xavierdupre.fr/app/mlinsights/helpsphinx/index.html',
'mlprodict': 'http://www.xavierdupre.fr/app/mlprodict/helpsphinx/index.html',
'mlstatpy': 'http://www.xavierdupre.fr/app/mlstatpy/helpsphinx/index.html',
'netron': 'https://github.com/lutzroeder/netron',
'numba': 'https://numba.org/',
'numpy': ('https://www.numpy.org/',
('https://docs.scipy.org/doc/numpy/reference/generated/numpy.{0}.html', 1),
Expand Down
Binary file added _doc/sphinxdoc/source/phdoc_static/debug.png
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 2 additions & 2 deletions _unittests/ut_module/test_code_style.py
Expand Up @@ -18,7 +18,7 @@ def test_style_src(self):
'R1702', 'W0212', 'W0640', 'W0223', 'W0201',
'W0622', 'C0123', 'W0107', 'R1728',
'C0415', 'R1721', 'C0411', 'R1735',
'C0208', 'C0325', 'W1514'),
'C0208', 'C0325', 'W1514', 'C0209'),
skip=["Instance of 'tuple' has no ",
"do not compare types, use 'isinstance()'",
"Instance of 'AutoAction' has no 'children' member",
Expand All @@ -34,7 +34,7 @@ def test_style_test(self):
test = os.path.normpath(os.path.join(thi, "..", ))
check_pep8(test, fLOG=fLOG, neg_pattern="temp_.*",
pylint_ignore=('C0103', 'C1801', 'R0201', 'R1705', 'W0108', 'W0613',
'C0111', 'W0107', 'C0415', 'R1728',
'C0111', 'W0107', 'C0415', 'R1728', 'C0209',
'R1721', 'C0302', 'C0411', 'R1735', 'W1514'),
skip=["Instance of 'tuple' has no ",
"R1720",
Expand Down
11 changes: 11 additions & 0 deletions _unittests/ut_npy/test_numpy_onnx_pyrt.py
Expand Up @@ -214,6 +214,7 @@ def test_det_float32(self):
[[6.1, 5], [3.5, -7.8]]], dtype=numpy.float32)
self.common_test1(x, numpy.linalg.det, nxnpy.det, numpy.float32)

@ignore_warnings(UserWarning)
def test_dot_float32(self):
x = numpy.array([[6.1, 5], [3.5, 7.8]], dtype=numpy.float32)
self.common_testn((x, x), numpy.dot, nxnpy.dot,
Expand Down Expand Up @@ -283,6 +284,16 @@ def test_log_float64(self):
self.common_test1(x, numpy.log, nxnpy.log, numpy.float64,
ort=older_than)

def test_log1p_float32(self):
x = numpy.array([[6.1, 5], [3.5, 7.8]], dtype=numpy.float32)
self.common_test1(x, numpy.log1p, nxnpy.log1p, numpy.float32)

def test_log1p_float64(self):
older_than = compare_module_version(ort_version, "1.7.0") >= 0
x = numpy.array([[6.1, 5], [3.5, 7.8]], dtype=numpy.float64)
self.common_test1(x, numpy.log1p, nxnpy.log1p, numpy.float64,
ort=older_than)

def test_mean_float32(self):
kwargs = [{'axis': 0}, {}, {'axis': 1}]
for kw in kwargs:
Expand Down
12 changes: 6 additions & 6 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_.py
Expand Up @@ -1874,7 +1874,7 @@ def test_onnxt_runtime_dequantize_linear(self):
onx = OnnxDequantizeLinear(
'X', x_scale, x_zero_point, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
model_def = onx.to_onnx({'X': X},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
Expand All @@ -1889,7 +1889,7 @@ def test_onnxt_runtime_dequantize_linear(self):
onx = OnnxDequantizeLinear(
'X', x_scale, x_zero_point, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': X.astype(numpy.float32)},
model_def = onx.to_onnx({'X': X},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': X})
Expand Down Expand Up @@ -2227,15 +2227,15 @@ def do_test_onnxt_runtime_gemm(self, runtime):
onx = OnnxGemm('X', idi, cst, transB=1, output_names=['Y'],
alpha=numpy.float32(1.),
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float64)},
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
if 'onnxruntime' in runtime:
model_def.ir_version = get_ir_version_from_onnx()
oinf = OnnxInference(model_def, runtime=runtime)
got = oinf.run({'X': X.astype(numpy.float32)})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(numpy.dot(X, idi.T) +
cst, got['Y'], decimal=5)
self.assertEqualArray(
numpy.dot(X, idi.T) + cst, got['Y'], decimal=5)

@wraplog()
def test_onnxt_runtime_global_average_pool(self):
Expand Down Expand Up @@ -2640,7 +2640,7 @@ def test_onnxt_runtime_qlinear_conv(self):
dtype=numpy.uint8).reshape((1, 1, 7, 7))

x_scale = numpy.float32(0.00369204697)
x_zero_point = numpy.uint8(132)
x_zero_point = numpy.array(132, dtype=numpy.uint8)

w = numpy.array([0], dtype=numpy.uint8).reshape((1, 1, 1, 1))

Expand Down
18 changes: 18 additions & 0 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_control_loop.py
Expand Up @@ -59,6 +59,12 @@ def expect(node, inputs, outputs, name):
for e, g in zip(outputs, oseq):
self.assertEqualArray(e, g)

del model_def.opset_import[:] # pylint: disable=E1101
op_set = model_def.opset_import.add() # pylint: disable=E1101
op_set.domain = ''
op_set.version = 14
model_def.ir_version = 7

test_cases = {
'at_back': [numpy.array([10, 11, 12]).astype(numpy.int64)],
'at_front': [numpy.array([-2, -1, 0]),
Expand Down Expand Up @@ -182,6 +188,12 @@ def test_loop(self):
'res', TensorProto.FLOAT, None)], # pylint: disable=E1101
nodes=[node, node_concat]))

del model_def.opset_import[:] # pylint: disable=E1101
op_set = model_def.opset_import.add() # pylint: disable=E1101
op_set.domain = ''
op_set.version = 14
model_def.ir_version = 7

expected = numpy.array([
1., 1., 2., 1., 2., 3., 1., 2.,
3., 4., 1., 2., 3., 4., 5.], dtype=numpy.float32)
Expand Down Expand Up @@ -292,6 +304,12 @@ def test_loop_additional_input(self):
'Y', TensorProto.INT64, [])], # pylint: disable=E1101
nodes=[node1, node, node_concat]))

del model_def.opset_import[:] # pylint: disable=E1101
op_set = model_def.opset_import.add() # pylint: disable=E1101
op_set.domain = ''
op_set.version = 14
model_def.ir_version = 7

expected = numpy.array([
1., 1., 2., 1., 2., 3., 1., 2.,
3., 4., 1., 2., 3., 4., 5.], dtype=numpy.float32)
Expand Down
6 changes: 3 additions & 3 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_ml_text.py
Expand Up @@ -464,9 +464,9 @@ def test_multi_output_classifier(self):
sess = OnnxInference(onx)

got = sess.run(inputs)
self.assertEqualArray(expected_label, got[0])
self.assertEqual(len(expected_proba), len(got[1]))
for e, g in zip(expected_proba, got[1]):
self.assertEqualArray(expected_label, got['label'])
self.assertEqual(len(expected_proba), len(got['probabilities']))
for e, g in zip(expected_proba, got['probabilities']):
self.assertEqualArray(e, g, decimal=5)


Expand Down
10 changes: 5 additions & 5 deletions _unittests/ut_onnxrt/test_onnxrt_validate_onnxruntime2.py
Expand Up @@ -27,7 +27,7 @@
class TestOnnxrtValidateOnnxRuntime(ExtTestCase):

@skipif_appveyor('crashes')
@ignore_warnings(category=ignored_warnings)
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_KMeans(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
Expand All @@ -50,7 +50,7 @@ def myprint(*args, **kwargs):
# self.assertGreater(len(buffer), 1)

@skipif_appveyor('crashes')
@ignore_warnings(category=ignored_warnings)
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_BernoulliNB(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
Expand All @@ -71,7 +71,7 @@ def myprint(*args, **kwargs):
self.assertGreater(len(buffer), 1 if debug else 0)

@skipif_appveyor('crashes')
@ignore_warnings(category=ignored_warnings)
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_AdaBoostRegressor(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
Expand All @@ -93,7 +93,7 @@ def myprint(*args, **kwargs):
self.assertGreater(len(buffer), 1 if debug else 0)

@skipif_appveyor('crashes')
@ignore_warnings(category=ignored_warnings)
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_onnxruntime_LogisticRegression(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
Expand All @@ -114,7 +114,7 @@ def myprint(*args, **kwargs):
# self.assertGreater(len(buffer), 1)

@skipif_appveyor('crashes')
@ignore_warnings(category=ignored_warnings)
@ignore_warnings(category=ConvergenceWarning)
def test_validate_sklearn_operators_all_onnxruntime(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
Expand Down
79 changes: 78 additions & 1 deletion _unittests/ut_tools/test_onnx_manipulations.py
Expand Up @@ -4,6 +4,7 @@
import unittest
from collections import OrderedDict
import numpy
from onnx import helper, TensorProto
from pyquickhelper.pycode import ExtTestCase
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAdd, OnnxMul, OnnxSub, OnnxIdentity, OnnxScan,
Expand All @@ -14,7 +15,7 @@
from mlprodict.onnx_tools.optim import onnx_remove_node_unused
from mlprodict.onnx_tools.onnx_manipulations import (
select_model_inputs_outputs, enumerate_model_node_outputs,
onnx_rename_names)
onnx_rename_names, insert_results_into_onnx)
from mlprodict.tools import get_opset_number_from_onnx


Expand Down Expand Up @@ -334,6 +335,82 @@ def flog(*s):
y2 = oinf2.run({'x': x})
self.assertEqualArray(y1['Y'], y2['Y'])

def test_insert_results_into_onnx(self):
X = helper.make_tensor_value_info(
'X', TensorProto.FLOAT, None) # pylint: disable=E1101
Z = helper.make_tensor_value_info(
'Z', TensorProto.INT64, None) # pylint: disable=E1101
node_def = helper.make_node('Shape', ['X'], ['Z0'], name='Zt')
node_def1 = helper.make_node('Identity', ['Z0'], ['Z'], name='Zti')
graph_def = helper.make_graph(
[node_def, node_def1], 'test-model', [X], [Z])
model_def = helper.make_model(
graph_def, producer_name='mlprodict',
ir_version=7, producer_version='0.1',
opset_imports=[helper.make_operatorsetid('', 13)])

new_graph = insert_results_into_onnx(
model_def, {'Z0': numpy.array([[29, 39]], dtype=numpy.int64)})
s_graph = str(new_graph)
self.assertIn('domain: "DEBUG"', s_graph)
self.assertNotIn('pname', s_graph)
self.assertIn('op_type: "DEBUG"', s_graph)
self.assertRaise(lambda: insert_results_into_onnx(
model_def, {'Zt': numpy.array([29, 39], dtype=numpy.int64)}),
RuntimeError)
# with open('debug.onnx', 'wb') as f:
# f.write(new_graph.SerializeToString())

oinf1 = OnnxInference(model_def)
oinf2 = OnnxInference(new_graph)
cst = numpy.array([[5.6, 7.8]])
self.assertEqualArray(oinf1.run({'X': cst})['Z'],
oinf2.run({'X': cst})['Z'])

onx = oinf1.run2onnx({'X': cst})[1]
s_graph = str(onx)
self.assertIn('domain: "DEBUG"', s_graph)
self.assertIn('op_type: "DEBUG"', s_graph)
self.assertNotIn('pname', s_graph)
oinf3 = OnnxInference(onx)
self.assertEqualArray(oinf1.run({'X': cst})['Z'],
oinf3.run({'X': cst})['Z'])

def test_insert_results_into_onnx_init(self):
X = helper.make_tensor_value_info(
'X', TensorProto.FLOAT, None) # pylint: disable=E1101
Z = helper.make_tensor_value_info(
'Z', TensorProto.INT64, None) # pylint: disable=E1101
node_def = helper.make_node('Shape', ['X'], ['Z0'], name='Zt')
node_def1 = helper.make_node('Identity', ['Z0'], ['Z'], name='Zti')
graph_def = helper.make_graph(
[node_def, node_def1], 'test-model', [X], [Z])
model_def = helper.make_model(
graph_def, producer_name='mlprodict',
ir_version=7, producer_version='0.1',
opset_imports=[helper.make_operatorsetid('', 13)])

new_graph = insert_results_into_onnx(
model_def, {'Z0': numpy.array([[29, 39]], dtype=numpy.int64)},
as_parameter=False, param_name=lambda k: k)
s_graph = str(new_graph)
self.assertIn('domain: "DEBUG"', s_graph)
self.assertIn('op_type: "DEBUG"', s_graph)
self.assertRaise(lambda: insert_results_into_onnx(
model_def, {'Zt': numpy.array([29, 39], dtype=numpy.int64)}),
RuntimeError)
self.assertRaise(lambda: insert_results_into_onnx(
model_def, {'X': numpy.array([29, 39], dtype=numpy.int64)}),
NotImplementedError)
# with open('debug.onnx', 'wb') as f:
# f.write(new_graph.SerializeToString())

oinf1 = OnnxInference(model_def)
oinf2 = OnnxInference(new_graph)
cst = numpy.array([[5.6, 7.8]])
self.assertEqualArray(oinf1.run({'X': cst})['Z'],
oinf2.run({'X': cst})['Z'])


if __name__ == "__main__":
unittest.main()
7 changes: 7 additions & 0 deletions mlprodict/npy/numpy_onnx_impl.py
Expand Up @@ -331,6 +331,13 @@ def log(x):
return OnnxVar(x, op=OnnxLog)


def log1p(x):
"See :epkg:`numpy:log1p`."
x1 = OnnxVar(x, numpy.array([1], dtype=x.dtype),
op=OnnxAdd)
return OnnxVar(x1, op=OnnxLog)


def mean(x, axis=None, keepdims=0):
"See :epkg:`numpy:mean`."
if axis is None:
Expand Down
7 changes: 7 additions & 0 deletions mlprodict/npy/numpy_onnx_pyrt.py
Expand Up @@ -42,6 +42,7 @@
hstack as nx_hstack,
isnan as nx_isnan,
log as nx_log,
log1p as nx_log1p,
matmul as nx_matmul,
mean as nx_mean,
pad as nx_pad,
Expand Down Expand Up @@ -248,6 +249,12 @@ def log(x):
return nx_log(x)


@onnxnumpy_np(signature=NDArraySameTypeSameShape("floats"))
def log1p(x):
"log1p"
return nx_log1p(x)


@onnxnumpy_np(signature=NDArrayType(("T:all", "T")))
def matmul(a, b):
"matmul"
Expand Down
18 changes: 17 additions & 1 deletion mlprodict/onnx_tools/onnx2py_helper.py
Expand Up @@ -8,7 +8,7 @@
import numpy
from scipy.sparse import coo_matrix
from onnx import onnx_pb as onnx_proto, TensorProto
from onnx.numpy_helper import to_array, from_array
from onnx.numpy_helper import to_array, from_array as onnx_from_array
from skl2onnx.common.data_types import _guess_numpy_type


Expand Down Expand Up @@ -42,6 +42,22 @@ def to_bytes(val):
return pb.SerializeToString()


def from_array(value, name=None):
"""
Converts an array into an ONNX tensor.
:param value: numpy array
:return: ONNX tensor
"""
if isinstance(value, numpy.ndarray):
pb = onnx_from_array(value, name=name)
return pb
if isinstance(value, TensorProto):
return value
raise NotImplementedError(
"Unable to convert type %r into an ONNX tensor." % type(value))


def from_bytes(b):
"""
Retrieves an array from bytes then protobuf.
Expand Down

0 comments on commit f83f9a8

Please sign in to comment.