Skip to content
This repository was archived by the owner on Jan 13, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion _doc/sphinxdoc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
try:
import generate_visual_graphs
import generate_automated_pages
except ImportError:
except ImportError: # pragma: no cover
this = os.path.dirname(__file__)
sys.path.append(os.path.join(this, '_exts'))
import generate_visual_graphs
Expand Down
2 changes: 1 addition & 1 deletion _doc/sphinxdoc/source/phdoc_static/my-styles.css
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ div.body ul {
}

div.body li {
line-height: 1em;
line-height: 1.1em;
}

.wy-nav-top {
Expand Down
49 changes: 49 additions & 0 deletions _unittests/ut_module/test_dl_mobilenet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
"""
@brief test log(time=7s)
"""
import unittest
import numpy
from pyquickhelper.pycode import ExtTestCase
from pyensae.datasource import download_data
from mlprodict.onnxrt import OnnxInference


class TestLONGMobileNet(ExtTestCase):

def test_mobilenet(self):
src = ("https://s3.amazonaws.com/onnx-model-zoo/mobilenet/"
"mobilenetv2-1.0/")
model_file = "mobilenetv2-1.0.onnx"
download_data(model_file, website=src)
X = numpy.random.rand(1, 3, 224, 224).astype(dtype=numpy.float32)
res = []
for i, rt in enumerate(['python', 'python_compiled_debug',
'python_compiled', 'onnxruntime1']):
oinf = OnnxInference(model_file, runtime=rt)
self.assertNotEmpty(oinf)
self.assertEqual(oinf.input_names[:1], ['data'])
if hasattr(oinf, 'inits_'):
self.assertIn(
"mobilenetv20_features_conv0_weight", oinf.inits_)
self.assertEqualArray(
(0, -1), oinf.inits_["reshape_attr_tensor421"]['value'])
name = oinf.input_names[0]
out = oinf.output_names[0]
Y = oinf.run({name: X})
if any(map(numpy.isnan, Y[out].ravel())):
raise AssertionError(
"Runtime {}:{} produces NaN.\n{}".format(i, rt, Y[out]))
res.append((rt, Y[out]))
for rt, r in res[1:]:
exp = numpy.squeeze(r[0])
got = numpy.squeeze(r)
try:
self.assertEqual(exp.shape, got.shape)
self.assertEqualArray(got, exp)
except AssertionError as e:
raise AssertionError(
"Issue with runtime: '{}'.".format(rt)) from e


if __name__ == "__main__":
unittest.main()
36 changes: 35 additions & 1 deletion _unittests/ut_onnxrt/test_cpu_ops.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
"""
@brief test log(time=2s)
@brief test log(time=3s)
"""
import unittest
from logging import getLogger
import numpy
import onnx
from pyquickhelper.pycode import ExtTestCase
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxConv)
from mlprodict.onnxrt.ops_cpu.op_conv import Conv
from mlprodict.onnxrt.onnx2py_helper import _var_as_dict
from mlprodict.tools.asv_options_helper import get_opset_number_from_onnx
from mlprodict.onnxrt import OnnxInference


class TestCpuOps(ExtTestCase):
Expand Down Expand Up @@ -46,6 +50,36 @@ def test_cpu_conv(self):
[72., 111., 117., 123., 84.]]]]).astype(numpy.float32)
self.assertEqualArray(exp, got)

def test_cpu_conv_init(self):
x = numpy.random.rand(1, 96, 56, 56).astype(numpy.float32)
W = numpy.random.rand(24, 96, 1, 1).astype(numpy.float32)

onx = OnnxConv(
'X', W, output_names=['Y'],
auto_pad='NOTSET', group=1, dilations=[1, 1],
kernel_shape=[1, 1], pads=[0, 0, 0, 0], strides=[1, 1],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32),
'W': W.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
oinfrt = OnnxInference(model_def, runtime='onnxruntime1')
for _ in range(0, 3):
x = numpy.random.rand(1, 96, 56, 56).astype(numpy.float32)
W = numpy.random.rand(24, 96, 1, 1).astype(numpy.float32)
got = oinf.run({'X': x, 'W': W})
gotrt = oinfrt.run({'X': x, 'W': W})
diff = list(numpy.abs((gotrt['Y'] - got['Y']).ravel()))
sdiff = list(sorted(diff))
if sdiff[-1] > 1e-5:
raise AssertionError("runtimes disagree {}".format(sdiff[-5:]))
for ii in range(len(diff)): # pylint: disable=C0200
if numpy.isnan(diff[ii]):
raise AssertionError(
"runtimes disagree about nan {}: {} # {} ? {}".format(
ii, diff[ii], gotrt['Y'].ravel()[ii], got['Y'].ravel()[ii]))
self.assertEqualArray(gotrt['Y'], got['Y'], decimal=5)


if __name__ == "__main__":
unittest.main()
21 changes: 21 additions & 0 deletions _unittests/ut_onnxrt/test_onnxrt_compiled.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,27 @@ def test_onnxt_idi(self):
self.assertIn('(Y, ) = n0_add(X, Ad_Addcst)', code)
self.assertIn(' def compiled_run(dict_inputs):', str(oinf))

def test_onnxt_idi_debug(self):
idi = numpy.identity(2)
onx = OnnxAdd('X', idi, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': idi.astype(numpy.float32)})

oinf = OnnxInference(model_def, runtime="python_compiled_debug")
res, out, err = self.capture(
lambda: oinf.run({'X': idi.astype(numpy.float32)}))
self.assertEmpty(err)
self.assertIn("-='i.X'", out)
self.assertIn("-='o.Y'", out)
self.assertEqual(idi * 2, res['Y'])
self.assertIn('_run_compiled', oinf.__dict__)
self.assertIn('_run_compiled_code', oinf.__dict__)
code = oinf._run_compiled_code # pylint: disable=W0212,E1101
self.assertIsInstance(code, str)
self.assertIn('def compiled_run(dict_inputs):', code)
self.assertIn('(Y, ) = n0_add(X, Ad_Addcst)', code)
self.assertIn(' def compiled_run(dict_inputs):', str(oinf))

@skipif_circleci('fails to finish')
def test_onnxt_iris_adaboost_regressor_dt(self):
iris = load_iris()
Expand Down
35 changes: 33 additions & 2 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAbs, OnnxAdd, OnnxArgMax, OnnxArgMin, OnnxAtan,
OnnxBatchNormalization,
OnnxConcat,
OnnxConcat, OnnxConv,
OnnxCeil, OnnxClip, OnnxConstant, OnnxConstantOfShape,
OnnxDequantizeLinear,
OnnxDiv,
Expand Down Expand Up @@ -400,7 +400,9 @@ def test_onnxt_runtime_atan(self):
self.common_test_onnxt_runtime_unary(OnnxAtan, numpy.arctan)

def test_onnxt_runtime_atan2(self):
test_pairs = [[y, x] for x in [3., -4., 0.] for y in [5., -6., 0.]]
test_pairs = [[y, x]
for x in [3., -4., 0., -1., 1.]
for y in [5., -6., 0., -1., 1.]]
y_val = numpy.array([y for y, x in test_pairs], dtype=numpy.float32)
x_val = numpy.array([x for y, x in test_pairs], dtype=numpy.float32)

Expand Down Expand Up @@ -568,6 +570,35 @@ def test_onnxt_runtime_constant_of_shape(self):
oinfpy = OnnxInference(model_def, runtime="python", inplace=True)
validate_python_inference(oinfpy, {'X': x})

def test_onnxt_runtime_conv(self):
x = numpy.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor
[5., 6., 7., 8., 9.],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.],
[20., 21., 22., 23., 24.]]]]).astype(numpy.float32)
W = numpy.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights
[1., 1., 1.],
[1., 1., 1.]]]]).astype(numpy.float32)

y_with_padding = numpy.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor
[33., 54., 63., 72., 51.],
[63., 99., 108., 117., 81.],
[93., 144., 153., 162., 111.],
[72., 111., 117., 123., 84.]]]]).astype(numpy.float32)

onx = OnnxConv(
'X', W, output_names=['Y'],
kernel_shape=[3, 3], pads=[1, 1, 1, 1],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y_with_padding, got['Y'])

python_tested.append(OnnxConv)

def test_onnxt_runtime_cum_sum(self):
from skl2onnx.algebra.onnx_ops import OnnxCumSum # pylint: disable=E0611

Expand Down
1 change: 1 addition & 0 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ def test_onnxt_runtime_solve(self):
validate_python_inference(
oinfpy, {'A': A.astype(numpy.float32),
'Y': Y.astype(numpy.float32)})
python_tested.append(OnnxSolve)


if __name__ == "__main__":
Expand Down
35 changes: 35 additions & 0 deletions _unittests/ut_tools/test_code_helper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
"""
@brief test log(time=8s)
"""
import os
import unittest
import pickle
import numpy
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
from mlprodict.tools.code_helper import debug_print, debug_dump


class TestCodeHelper(ExtTestCase):

def test_debug_print(self):
_, out, err = self.capture(
lambda: debug_print('r', numpy.array([0, 1], dtype=numpy.float32), {}))
self.assertIn("'r'", out)
self.assertEmpty(err)

def test_debug_dump(self):
temp = get_temp_folder(__file__, "temp_debug_dump")
obj = {'in': [numpy.array([0, 1]), numpy.array([1, 2])],
'out': [numpy.array([0, numpy.nan])]}
_, out, __ = self.capture(
lambda: debug_dump("rrr", obj, temp))
self.assertIn("NAN-notin-out", out)
files = os.listdir(temp)
self.assertEqual(len(files), 1)
with open(os.path.join(temp, files[0]), 'rb') as f:
obj2 = pickle.load(f)
self.assertEqual(list(obj.keys()), list(obj2.keys()))


if __name__ == "__main__":
unittest.main()
2 changes: 1 addition & 1 deletion mlprodict/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
@brief Ways to speed up predictions for a machine learned model.
"""

__version__ = "0.4.1208"
__version__ = "0.4.1209"
__author__ = "Xavier Dupré"


Expand Down
4 changes: 2 additions & 2 deletions mlprodict/onnxrt/onnx2py_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def numpy_min(x):
try:
if hasattr(x, 'todense'):
x = x.todense()
if x.dtype.kind.lower() not in 'uc':
if x.dtype.kind.lower() not in 'c':
return x.min()
try: # pragma: no cover
x = x.ravel()
Expand All @@ -347,7 +347,7 @@ def numpy_max(x):
try:
if hasattr(x, 'todense'):
x = x.todense()
if x.dtype.kind.lower() not in 'uc':
if x.dtype.kind.lower() not in 'c':
return x.max()
try: # pragma: no cover
x = x.ravel()
Expand Down
Loading