Skip to content
This repository was archived by the owner on Jan 13, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion _unittests/ut_npy/test_custom_embedded_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def test_function_classifier_embedded_float64(self):
self.common_test_function_classifier_embedded(numpy.float64)

def common_test_function_regressor_embedded(self, dtype):
X = numpy.random.randn(20, 2).astype(dtype)
X = numpy.random.randn(40, 2).astype(dtype)
y = (X.sum(axis=1) + numpy.random.randn(
X.shape[0])).astype(numpy.float32)
dec = TwoLinearRegressionOnnx()
Expand Down
79 changes: 44 additions & 35 deletions _unittests/ut_onnx_conv/test_scorers.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
from mlprodict.onnx_conv.scorers.register import CustomScorerTransform
from mlprodict.onnxrt import OnnxInference
from mlprodict.onnx_conv.scorers.cdist_score import score_cdist_sum
from mlprodict.tools.asv_options_helper import (
get_opset_number_from_onnx)


class TestScorers(ExtTestCase):
Expand Down Expand Up @@ -56,48 +58,55 @@ def test_score_cdist_sum_onnx(self):

init_types = OrderedDict([('X', X), ('Y', Y)])

opsets = [11, get_opset_number_from_onnx()]
options = {id(score_cdist_sum): {"cdist": "single-node"}}
temp = get_temp_folder(__file__, 'temp_score_cdist_sum_onnx')

for metric in ['sqeuclidean', 'euclidean', 'minkowski']:
if metric == 'minkowski':
scorer = make_scorer(
score_cdist_sum, metric=metric, greater_is_better=False,
p=3)
else:
scorer = make_scorer(
score_cdist_sum, metric=metric, greater_is_better=False)
self.assertRaise(lambda: to_onnx(scorer, X), # pylint: disable=W0640
ValueError)
for opset in opsets:
with self.subTest(metric=metric, opset=opset):
if metric == 'minkowski':
scorer = make_scorer(
score_cdist_sum, metric=metric,
greater_is_better=False, p=3)
else:
scorer = make_scorer(
score_cdist_sum, metric=metric,
greater_is_better=False)
self.assertRaise(
lambda: to_onnx(
scorer, X, target_opset=opset), # pylint: disable=W0640
ValueError)

monx1 = to_onnx(scorer, init_types)
monx2 = to_onnx(scorer, init_types, options=options)
monx1 = to_onnx(scorer, init_types, target_opset=opset)
monx2 = to_onnx(scorer, init_types, options=options,
target_opset=opset)

oinf1 = OnnxInference(monx1)
oinf2 = OnnxInference(monx2)
if metric == 'minkowski':
res0 = score_cdist_sum(X, Y, metric=metric, p=3)
else:
res0 = score_cdist_sum(X, Y, metric=metric)
res1 = oinf1.run({'X': X, 'Y': Y})['scores']
res2 = oinf2.run({'X': X, 'Y': Y})['scores']
self.assertEqualArray(res1, res0, decimal=5)
self.assertEqualArray(res2, res0, decimal=5)
oinf1 = OnnxInference(monx1)
oinf2 = OnnxInference(monx2)
if metric == 'minkowski':
res0 = score_cdist_sum(X, Y, metric=metric, p=3)
else:
res0 = score_cdist_sum(X, Y, metric=metric)
res1 = oinf1.run({'X': X, 'Y': Y})['scores']
res2 = oinf2.run({'X': X, 'Y': Y})['scores']
self.assertEqualArray(res1, res0, decimal=5)
self.assertEqualArray(res2, res0, decimal=5)

name1 = os.path.join(temp, "cdist_scan_%s.onnx" % metric)
with open(name1, 'wb') as f:
f.write(monx1.SerializeToString())
name2 = os.path.join(temp, "cdist_cdist_%s.onnx" % metric)
with open(name2, 'wb') as f:
f.write(monx2.SerializeToString())
data = os.path.join(temp, "data_%s.txt" % metric)
with open(data, "w") as f:
f.write("X\n")
f.write(str(X) + "\n")
f.write("Y\n")
f.write(str(Y) + "\n")
f.write("expected\n")
f.write(str(res0) + "\n")
name1 = os.path.join(temp, "cdist_scan_%s.onnx" % metric)
with open(name1, 'wb') as f:
f.write(monx1.SerializeToString())
name2 = os.path.join(temp, "cdist_cdist_%s.onnx" % metric)
with open(name2, 'wb') as f:
f.write(monx2.SerializeToString())
data = os.path.join(temp, "data_%s.txt" % metric)
with open(data, "w") as f:
f.write("X\n")
f.write(str(X) + "\n")
f.write("Y\n")
f.write(str(Y) + "\n")
f.write("expected\n")
f.write(str(res0) + "\n")


if __name__ == "__main__":
Expand Down
107 changes: 99 additions & 8 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
OnnxArgMin_11, OnnxArgMin,
OnnxBatchNormalization,
OnnxAcos, OnnxAcosh, OnnxAsin, OnnxAsinh, OnnxAtan, OnnxAtanh,
OnnxCeil, OnnxClip,
OnnxCast, OnnxCeil, OnnxClip,
OnnxCompress,
OnnxConcat, OnnxConv, OnnxConvTranspose,
OnnxConstant, OnnxConstant_9, OnnxConstant_11,
Expand Down Expand Up @@ -72,8 +72,6 @@
from skl2onnx.algebra.onnx_ops import OnnxBatchNormalization_14
except ImportError:
OnnxBatchNormalization_14 = None
from skl2onnx.common.data_types import (
FloatTensorType, Int64TensorType, DoubleTensorType, StringTensorType)
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt import OnnxInference
from mlprodict.tools.asv_options_helper import (
Expand All @@ -91,6 +89,14 @@
from mlprodict.onnxrt.ops_cpu.op_pad import _pad_impl
from mlprodict.onnxrt.ops_cpu.op_max_pool import _pool_get_output_shape, _pool_impl
from mlprodict.onnxrt.ops_cpu.op_dropout import _dropout
from mlprodict.onnxrt.ops_cpu._op_helper import proto2dtype
from mlprodict.tools.onnx2py_helper import (
guess_proto_dtype, _elem_type_as_str)
from mlprodict.tools.data_types import (
FloatTensorType, Int64TensorType, DoubleTensorType, StringTensorType,
Int32TensorType, BooleanTensorType, UInt8TensorType,
Int16TensorType, Int8TensorType, UInt16TensorType,
UInt32TensorType, UInt64TensorType, Float16TensorType)


try:
Expand Down Expand Up @@ -347,7 +353,6 @@ def test_onnxt_runtime_argmax(self):
self.assertEqualArray(numpy.argmax(
X, axis=0), got['Y'], decimal=6)

python_tested.append(OnnxArgMax)
if br:
continue

Expand Down Expand Up @@ -391,9 +396,11 @@ def test_onnxt_runtime_argmax(self):
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(exp, got['Y'], decimal=6)
sparse_support.append(('UnOp', None, OnnxArgMax.__name__))
X = numpy.array([[2, 1], [0, 1]], dtype=float)

sparse_support.append(('UnOp', None, OnnxArgMax.__name__))
python_tested.append(OnnxArgMax)

@unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0")
@wraplog()
def test_onnxt_runtime_argmax_12(self):
Expand Down Expand Up @@ -437,7 +444,6 @@ def test_onnxt_runtime_argmin(self):
if br:
continue

python_tested.append(OnnxArgMin)
oinfpy = OnnxInference(
model_def, runtime="python", inplace=True)
validate_python_inference(
Expand Down Expand Up @@ -477,7 +483,9 @@ def test_onnxt_runtime_argmin(self):
got = oinf.run({'X': X})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(exp, got['Y'], decimal=6)
sparse_support.append(('UnOp', None, OnnxArgMin.__name__))

sparse_support.append(('UnOp', None, OnnxArgMin.__name__))
python_tested.append(OnnxArgMin)

@unittest.skipIf(onnx_opset_version() < 12, reason="needs onnx 1.7.0")
@wraplog()
Expand Down Expand Up @@ -642,6 +650,89 @@ def test_onnxt_runtime_batch_normalization_training(self):
self.assertNotEmpty(y)
self.assertNotEmpty(var)

@wraplog()
def test_onnxt_runtime_cast_out(self):
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(numpy.float32) # pylint: disable=E1101
dest = [(TensorProto.FLOAT, numpy.float32, FloatTensorType), # pylint: disable=E1101
(TensorProto.DOUBLE, numpy.float64, DoubleTensorType), # pylint: disable=E1101
(TensorProto.INT32, numpy.int32, Int32TensorType), # pylint: disable=E1101
(TensorProto.INT64, numpy.int64, Int64TensorType), # pylint: disable=E1101
(TensorProto.INT8, numpy.int8, Int8TensorType), # pylint: disable=E1101
(TensorProto.INT16, numpy.int16, Int16TensorType), # pylint: disable=E1101
(TensorProto.UINT8, numpy.uint8, UInt8TensorType), # pylint: disable=E1101
(TensorProto.UINT32, numpy.uint32, UInt32TensorType), # pylint: disable=E1101
(TensorProto.UINT16, numpy.uint16, UInt16TensorType), # pylint: disable=E1101
(TensorProto.UINT64, numpy.uint64, UInt64TensorType), # pylint: disable=E1101
(TensorProto.FLOAT16, numpy.float16, Float16TensorType), # pylint: disable=E1101
(TensorProto.BOOL, numpy.bool_, BooleanTensorType), # pylint: disable=E1101
(TensorProto.STRING, numpy.str_, StringTensorType), ] # pylint: disable=E1101

for opset in range(9, get_opset_number_from_onnx() + 1):
for to, nptp, outp in dest:
if nptp == numpy.bool_:
self.assertIn(proto2dtype(to), (nptp, bool))
elif nptp == numpy.str_:
self.assertIn(proto2dtype(to), (nptp, str))
else:
self.assertEqual(proto2dtype(to), nptp)
self.assertEqual(to, guess_proto_dtype(nptp))
self.assertNotEmpty(_elem_type_as_str(to))
with self.subTest(opset=opset, to=to):
onx = OnnxCast('X', to=to, output_names=['Y'],
op_version=opset)
model_def = onx.to_onnx(
{'X': x}, outputs=[('Y', outp())],
target_opset=opset)
got = OnnxInference(model_def).run({'X': x})
if nptp == numpy.str_:
self.assertEqual(
x.astype(nptp).tolist(), got['Y'].tolist())
else:
self.assertEqualArray(x.astype(nptp), got['Y'])

python_tested.append(OnnxCast)

@wraplog()
def test_onnxt_runtime_cast_in(self):
x = numpy.array([1., 2., 3., 4., 5., 6.]).astype(numpy.float32) # pylint: disable=E1101
dest = [(TensorProto.FLOAT, numpy.float32, FloatTensorType), # pylint: disable=E1101
(TensorProto.DOUBLE, numpy.float64, DoubleTensorType), # pylint: disable=E1101
(TensorProto.INT32, numpy.int32, Int32TensorType), # pylint: disable=E1101
(TensorProto.INT64, numpy.int64, Int64TensorType), # pylint: disable=E1101
(TensorProto.INT8, numpy.int8, Int8TensorType), # pylint: disable=E1101
(TensorProto.INT16, numpy.int16, Int16TensorType), # pylint: disable=E1101
(TensorProto.UINT8, numpy.uint8, UInt8TensorType), # pylint: disable=E1101
(TensorProto.UINT32, numpy.uint32, UInt32TensorType), # pylint: disable=E1101
(TensorProto.UINT16, numpy.uint16, UInt16TensorType), # pylint: disable=E1101
(TensorProto.UINT64, numpy.uint64, UInt64TensorType), # pylint: disable=E1101
(TensorProto.FLOAT16, numpy.float16, Float16TensorType), # pylint: disable=E1101
(TensorProto.BOOL, numpy.bool_, BooleanTensorType), # pylint: disable=E1101
(TensorProto.STRING, numpy.str_, StringTensorType), ] # pylint: disable=E1101

for opset in range(9, get_opset_number_from_onnx() + 1):
for to, nptp, _ in dest:
if nptp == numpy.bool_:
self.assertIn(proto2dtype(to), (nptp, bool))
elif nptp == numpy.str_:
self.assertIn(proto2dtype(to), (nptp, str))
else:
self.assertEqual(proto2dtype(to), nptp)
self.assertEqual(to, guess_proto_dtype(nptp))
self.assertNotEmpty(_elem_type_as_str(to))
with self.subTest(opset=opset, to=to):
xi = x.astype(nptp)
onx = OnnxCast('X', to=TensorProto.STRING, # pylint: disable=E1101
output_names=['Y'],
op_version=opset)
model_def = onx.to_onnx(
{'X': xi}, outputs=[('Y', StringTensorType())],
target_opset=opset)
got = OnnxInference(model_def).run({'X': xi})
self.assertEqual(
xi.astype(str).tolist(), got['Y'].tolist())

python_tested.append(OnnxCast)

@wraplog()
def test_onnxt_runtime_ceil(self):
self.common_test_onnxt_runtime_unary(OnnxCeil, numpy.ceil)
Expand Down Expand Up @@ -3315,5 +3406,5 @@ def test_make_constant(self):


if __name__ == "__main__":
# TestOnnxrtPythonRuntime().test_onnxt_runtime_pad()
# TestOnnxrtPythonRuntime().test_onnxt_runtime_cast_in()
unittest.main()
12 changes: 12 additions & 0 deletions mlprodict/onnxrt/ops_cpu/_op_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,18 @@ def proto2dtype(proto_type):
return numpy.int64
if proto_type == TensorProto.INT32: # pylint: disable=E1101
return numpy.int32
if proto_type == TensorProto.INT8: # pylint: disable=E1101
return numpy.int8
if proto_type == TensorProto.INT16: # pylint: disable=E1101
return numpy.int16
if proto_type == TensorProto.UINT64: # pylint: disable=E1101
return numpy.uint64
if proto_type == TensorProto.UINT32: # pylint: disable=E1101
return numpy.uint32
if proto_type == TensorProto.UINT8: # pylint: disable=E1101
return numpy.uint8
if proto_type == TensorProto.UINT16: # pylint: disable=E1101
return numpy.uint16
if proto_type == TensorProto.FLOAT16: # pylint: disable=E1101
return numpy.float16
raise ValueError(
Expand Down
8 changes: 8 additions & 0 deletions mlprodict/onnxrt/ops_cpu/op_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,18 @@ def __init__(self, onnx_node, desc=None, **options):
self._dtype = numpy.uint8
elif self.to == TensorProto.INT8: # pylint: disable=E1101
self._dtype = numpy.int8
elif self.to == TensorProto.INT16: # pylint: disable=E1101
self._dtype = numpy.int16
elif self.to == TensorProto.INT32: # pylint: disable=E1101
self._dtype = numpy.int32
elif self.to == TensorProto.INT64: # pylint: disable=E1101
self._dtype = numpy.int64
elif self.to == TensorProto.UINT16: # pylint: disable=E1101
self._dtype = numpy.uint16
elif self.to == TensorProto.UINT32: # pylint: disable=E1101
self._dtype = numpy.uint32
elif self.to == TensorProto.UINT64: # pylint: disable=E1101
self._dtype = numpy.uint64
elif self.to == TensorProto.BOOL: # pylint: disable=E1101
self._dtype = numpy.bool
elif self.to == TensorProto.STRING: # pylint: disable=E1101
Expand Down
8 changes: 8 additions & 0 deletions mlprodict/onnxrt/shape_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,14 @@ def __init__(self, shape, dtype=None, use_n1=False, name=None):
self._dtype = numpy.int8
elif self._dtype in (numpy.uint8, 'uint8', ):
self._dtype = numpy.uint8
elif self._dtype in (numpy.int16, 'int16', ):
self._dtype = numpy.int16
elif self._dtype in (numpy.uint16, 'uint16', ):
self._dtype = numpy.uint16
elif self._dtype in (numpy.uint32, 'uint32', ):
self._dtype = numpy.uint32
elif self._dtype in (numpy.uint64, 'uint64', ):
self._dtype = numpy.uint64
elif self._dtype not in {
numpy.float32, numpy.float64, numpy.int32, numpy.int64,
numpy.str, numpy.bool, numpy.float16, None,
Expand Down
14 changes: 14 additions & 0 deletions mlprodict/tools/data_types.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
"""
@file
@brief Creates missing types in onnxconverter-common.

.. versionadded:: 0.6
"""
from onnx import onnx_pb as onnx_proto # pylint: disable=W0611
from skl2onnx.common.data_types import ( # pylint: disable=W0611
TensorType, FloatTensorType, Int64TensorType, DoubleTensorType,
StringTensorType, Int32TensorType, BooleanTensorType,
UInt8TensorType)
from skl2onnx.common.data_types import ( # pylint: disable=W0611
Int16TensorType, Int8TensorType, UInt16TensorType,
UInt32TensorType, UInt64TensorType, Float16TensorType)
26 changes: 25 additions & 1 deletion mlprodict/tools/onnx2py_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,14 @@ def _elem_type_as_str(elem_type):
return 'int64'
if elem_type == onnx_proto.TensorProto.INT32: # pylint: disable=E1101
return 'int32'
if elem_type == onnx_proto.TensorProto.UINT32: # pylint: disable=E1101
return 'uint32'
if elem_type == onnx_proto.TensorProto.UINT64: # pylint: disable=E1101
return 'uint64'
if elem_type == onnx_proto.TensorProto.INT16: # pylint: disable=E1101
return 'int16'
if elem_type == onnx_proto.TensorProto.UINT16: # pylint: disable=E1101
return 'uint16'
if elem_type == onnx_proto.TensorProto.UINT8: # pylint: disable=E1101
return 'uint8'
if elem_type == onnx_proto.TensorProto.INT8: # pylint: disable=E1101
Expand Down Expand Up @@ -400,7 +408,23 @@ def guess_proto_dtype(dtype):
return TensorProto.INT64 # pylint: disable=E1101
if dtype == numpy.int32:
return TensorProto.INT32 # pylint: disable=E1101
if dtype in (numpy.bool, bool):
if dtype == numpy.int16:
return TensorProto.INT16 # pylint: disable=E1101
if dtype == numpy.int8:
return TensorProto.INT8 # pylint: disable=E1101
if dtype == numpy.uint64:
return TensorProto.UINT64 # pylint: disable=E1101
if dtype == numpy.uint32:
return TensorProto.UINT32 # pylint: disable=E1101
if dtype == numpy.uint16:
return TensorProto.UINT16 # pylint: disable=E1101
if dtype == numpy.uint8:
return TensorProto.UINT8 # pylint: disable=E1101
if dtype == numpy.float16:
return TensorProto.FLOAT16 # pylint: disable=E1101
if dtype in (numpy.bool, bool, numpy.bool_):
return TensorProto.BOOL # pylint: disable=E1101
if dtype in (numpy.str, str, numpy.str_):
return TensorProto.STRING # pylint: disable=E1101
raise RuntimeError(
"Unable to guess type for dtype={}.".format(dtype)) # pragma: no cover