From 64d31fda37b8bc341e0b22ca276d2402f4022b50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?xavier=20dupr=C3=A9?= Date: Tue, 21 Jul 2020 00:23:45 +0200 Subject: [PATCH 1/2] Fixes #142, add operator BatchNormalization --- .../ut_onnxrt/test_onnxrt_python_runtime_.py | 43 +++++++++++++++++++ mlprodict/__init__.py | 2 +- mlprodict/onnxrt/ops_cpu/_op_list.py | 1 + .../onnxrt/ops_cpu/op_batch_normalization.py | 36 ++++++++++++++++ 4 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 mlprodict/onnxrt/ops_cpu/op_batch_normalization.py diff --git a/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py b/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py index 6fec65530..e52e04090 100644 --- a/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py +++ b/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py @@ -22,6 +22,7 @@ from sklearn.utils.testing import ignore_warnings from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611 OnnxAbs, OnnxAdd, OnnxArgMax, OnnxArgMin, + OnnxBatchNormalization, OnnxConcat, OnnxCeil, OnnxClip, OnnxConstant, OnnxConstantOfShape, OnnxDequantizeLinear, @@ -54,6 +55,7 @@ from mlprodict.tools.asv_options_helper import ( get_opset_number_from_onnx, get_ir_version_from_onnx) from mlprodict.onnxrt.validate.validate_python import validate_python_inference +from mlprodict.onnxrt.ops_cpu.op_batch_normalization import _batchnorm_test_mode from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611 topk_element_min_double, topk_element_max_double, topk_element_fetch_double, topk_element_min_float, topk_element_max_float, topk_element_fetch_float, @@ -393,6 +395,47 @@ def test_onnxt_runtime_argmin_12(self): self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64), got['Y'], decimal=6) + def test_onnxt_batch_normalization(self): + # input size: (1, 2, 1, 3) + x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32) + s = numpy.array([1.0, 1.5]).astype(numpy.float32) + bias = numpy.array([0, 1]).astype(numpy.float32) + mean = numpy.array([0, 3]).astype(numpy.float32) + var = numpy.array([1, 1.5]).astype(numpy.float32) + y = _batchnorm_test_mode(x, s, bias, mean, var).astype(numpy.float32) + + onx = OnnxBatchNormalization( + 'X', s, bias, mean, var, output_names=['Y'], + op_version=get_opset_number_from_onnx()) + model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, + target_opset=get_opset_number_from_onnx()) + oinf = OnnxInference(model_def) + got = oinf.run({'X': x}) + self.assertEqual(list(sorted(got)), ['Y']) + self.assertEqualArray(y, got['Y']) + + # input size: (2, 3, 4, 5) + x = numpy.random.randn(2, 3, 4, 5).astype(numpy.float32) + s = numpy.random.randn(3).astype(numpy.float32) + bias = numpy.random.randn(3).astype(numpy.float32) + mean = numpy.random.randn(3).astype(numpy.float32) + var = numpy.random.rand(3).astype(numpy.float32) + epsilon = 1e-2 + y = _batchnorm_test_mode( + x, s, bias, mean, var, epsilon).astype(numpy.float32) + + onx = OnnxBatchNormalization( + 'X', s, bias, mean, var, + output_names=['Y'], epsilon=epsilon, + op_version=get_opset_number_from_onnx()) + model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, + target_opset=get_opset_number_from_onnx()) + oinf = OnnxInference(model_def) + got = oinf.run({'X': x}) + self.assertEqual(list(sorted(got)), ['Y']) + self.assertEqualArray(y, got['Y']) + python_tested.append(OnnxBatchNormalization) + def test_onnxt_runtime_ceil(self): self.common_test_onnxt_runtime_unary(OnnxCeil, numpy.ceil) diff --git a/mlprodict/__init__.py b/mlprodict/__init__.py index 2995fb794..50e5777b6 100644 --- a/mlprodict/__init__.py +++ b/mlprodict/__init__.py @@ -4,7 +4,7 @@ @brief Ways to speed up predictions for a machine learned model. """ -__version__ = "0.4.1215" +__version__ = "0.4.1207" __author__ = "Xavier Dupré" diff --git a/mlprodict/onnxrt/ops_cpu/_op_list.py b/mlprodict/onnxrt/ops_cpu/_op_list.py index 283352162..573d19227 100644 --- a/mlprodict/onnxrt/ops_cpu/_op_list.py +++ b/mlprodict/onnxrt/ops_cpu/_op_list.py @@ -11,6 +11,7 @@ from .op_argmax import ArgMax from .op_argmin import ArgMin from .op_array_feature_extractor import ArrayFeatureExtractor +from .op_batch_normalization import BatchNormalization from .op_binarizer import Binarizer from .op_cast import Cast from .op_cdist import CDist diff --git a/mlprodict/onnxrt/ops_cpu/op_batch_normalization.py b/mlprodict/onnxrt/ops_cpu/op_batch_normalization.py new file mode 100644 index 000000000..a7f6dd083 --- /dev/null +++ b/mlprodict/onnxrt/ops_cpu/op_batch_normalization.py @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# pylint: disable=E0203,E1101,C0111 +""" +@file +@brief Runtime operator. +""" +import numpy +from ._op import OpRun + + +def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): + dims_x = len(x.shape) + dim_ones = (1,) * (dims_x - 2) + s = s.reshape(-1, *dim_ones) + bias = bias.reshape(-1, *dim_ones) + mean = mean.reshape(-1, *dim_ones) + var = var.reshape(-1, *dim_ones) + return s * (x - mean) / numpy.sqrt(var + epsilon) + bias + + +class BatchNormalization(OpRun): + + atts = {'epsilon': 1e-5, 'momentum': 0.9} + + def __init__(self, onnx_node, desc=None, **options): + OpRun.__init__(self, onnx_node, desc=desc, + expected_attributes=BatchNormalization.atts, + **options) + + def _run(self, x, scale, bias, mean, var): # pylint: disable=W0221 + res = _batchnorm_test_mode( + x, scale, bias, mean, var, epsilon=self.epsilon) + return (res, ) + + def _infer_shapes(self, x, scale, bias, mean, var): # pylint: disable=W0221 + return (x, ) From 74ad29fd73cec323253616556cc40007f8283b4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?xavier=20dupr=C3=A9?= Date: Tue, 21 Jul 2020 10:40:46 +0200 Subject: [PATCH 2/2] Fixes #143, add operator GlobalAveragePool --- .../ut_onnxrt/test_onnxrt_python_runtime_.py | 37 ++++++++++++++++++- mlprodict/__init__.py | 2 +- mlprodict/onnxrt/ops_cpu/_op_list.py | 1 + .../onnxrt/ops_cpu/op_global_average_pool.py | 33 +++++++++++++++++ 4 files changed, 70 insertions(+), 3 deletions(-) create mode 100644 mlprodict/onnxrt/ops_cpu/op_global_average_pool.py diff --git a/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py b/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py index e52e04090..bbdd667c7 100644 --- a/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py +++ b/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py @@ -29,7 +29,7 @@ OnnxDiv, OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike, OnnxFlatten, OnnxFloor, - OnnxGreater, OnnxGemm, + OnnxGreater, OnnxGemm, OnnxGlobalAveragePool, OnnxIdentity, OnnxIsNaN, OnnxLog, OnnxLpNormalization, OnnxMatMul, OnnxMax, OnnxMean, OnnxMin, OnnxMul, @@ -56,6 +56,7 @@ get_opset_number_from_onnx, get_ir_version_from_onnx) from mlprodict.onnxrt.validate.validate_python import validate_python_inference from mlprodict.onnxrt.ops_cpu.op_batch_normalization import _batchnorm_test_mode +from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611 topk_element_min_double, topk_element_max_double, topk_element_fetch_double, topk_element_min_float, topk_element_max_float, topk_element_fetch_float, @@ -395,7 +396,7 @@ def test_onnxt_runtime_argmin_12(self): self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64), got['Y'], decimal=6) - def test_onnxt_batch_normalization(self): + def test_onnxt_runtime_batch_normalization(self): # input size: (1, 2, 1, 3) x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32) s = numpy.array([1.0, 1.5]).astype(numpy.float32) @@ -899,6 +900,38 @@ def do_test_onnxt_runtime_gemm(self, runtime): self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=6) + def test_onnxt_runtime_global_average_pool(self): + x = x = numpy.random.randn(1, 3, 5, 5).astype(numpy.float32) + y = _global_average_pool(x).astype(numpy.float32) + + onx = OnnxGlobalAveragePool( + 'X', output_names=['Y'], + op_version=get_opset_number_from_onnx()) + model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, + target_opset=get_opset_number_from_onnx()) + oinf = OnnxInference(model_def) + got = oinf.run({'X': x}) + self.assertEqual(list(sorted(got)), ['Y']) + self.assertEqualArray(y, got['Y']) + + x = numpy.array([[[ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ]]]).astype(numpy.float32) + y = numpy.array([[[[5]]]]).astype(numpy.float32) + onx = OnnxGlobalAveragePool( + 'X', output_names=['Y'], + op_version=get_opset_number_from_onnx()) + model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, + target_opset=get_opset_number_from_onnx()) + oinf = OnnxInference(model_def) + got = oinf.run({'X': x}) + self.assertEqual(list(sorted(got)), ['Y']) + self.assertEqualArray(y, got['Y']) + + python_tested.append(OnnxGlobalAveragePool) + def test_onnxt_runtime_greater(self): self.common_test_onnxt_runtime_binary(OnnxGreater, numpy.greater) diff --git a/mlprodict/__init__.py b/mlprodict/__init__.py index 50e5777b6..dcded8681 100644 --- a/mlprodict/__init__.py +++ b/mlprodict/__init__.py @@ -4,7 +4,7 @@ @brief Ways to speed up predictions for a machine learned model. """ -__version__ = "0.4.1207" +__version__ = "0.4.1208" __author__ = "Xavier Dupré" diff --git a/mlprodict/onnxrt/ops_cpu/_op_list.py b/mlprodict/onnxrt/ops_cpu/_op_list.py index 573d19227..628ebb734 100644 --- a/mlprodict/onnxrt/ops_cpu/_op_list.py +++ b/mlprodict/onnxrt/ops_cpu/_op_list.py @@ -36,6 +36,7 @@ from .op_gather import Gather from .op_gather_elements import GatherElements from .op_gemm import Gemm +from .op_global_average_pool import GlobalAveragePool from .op_greater import Greater from .op_floor import Floor from .op_identity import Identity diff --git a/mlprodict/onnxrt/ops_cpu/op_global_average_pool.py b/mlprodict/onnxrt/ops_cpu/op_global_average_pool.py new file mode 100644 index 000000000..dfa6cce69 --- /dev/null +++ b/mlprodict/onnxrt/ops_cpu/op_global_average_pool.py @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- +# pylint: disable=E0203,E1101,C0111 +""" +@file +@brief Runtime operator. +""" +import numpy +from ..shape_object import ShapeObject +from ._op import OpRun + + +def _global_average_pool(x): + spatial_shape = numpy.ndim(x) - 2 + y = numpy.average( + x, axis=tuple(range(spatial_shape, spatial_shape + 2))) + for _ in range(spatial_shape): + y = numpy.expand_dims(y, -1) + return y + + +class GlobalAveragePool(OpRun): + + def __init__(self, onnx_node, desc=None, **options): + OpRun.__init__(self, onnx_node, desc=desc, + **options) + + def _run(self, x): # pylint: disable=W0221 + res = _global_average_pool(x) + return (res, ) + + def _infer_shapes(self, x): # pylint: disable=W0221 + shape = x.shape[:2] + (1, ) * (len(x.shape) - 2) + return (ShapeObject(shape, dtype=x.dtype), )