diff --git a/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py b/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py index e52e04090..bbdd667c7 100644 --- a/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py +++ b/_unittests/ut_onnxrt/test_onnxrt_python_runtime_.py @@ -29,7 +29,7 @@ OnnxDiv, OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike, OnnxFlatten, OnnxFloor, - OnnxGreater, OnnxGemm, + OnnxGreater, OnnxGemm, OnnxGlobalAveragePool, OnnxIdentity, OnnxIsNaN, OnnxLog, OnnxLpNormalization, OnnxMatMul, OnnxMax, OnnxMean, OnnxMin, OnnxMul, @@ -56,6 +56,7 @@ get_opset_number_from_onnx, get_ir_version_from_onnx) from mlprodict.onnxrt.validate.validate_python import validate_python_inference from mlprodict.onnxrt.ops_cpu.op_batch_normalization import _batchnorm_test_mode +from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611 topk_element_min_double, topk_element_max_double, topk_element_fetch_double, topk_element_min_float, topk_element_max_float, topk_element_fetch_float, @@ -395,7 +396,7 @@ def test_onnxt_runtime_argmin_12(self): self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64), got['Y'], decimal=6) - def test_onnxt_batch_normalization(self): + def test_onnxt_runtime_batch_normalization(self): # input size: (1, 2, 1, 3) x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32) s = numpy.array([1.0, 1.5]).astype(numpy.float32) @@ -899,6 +900,38 @@ def do_test_onnxt_runtime_gemm(self, runtime): self.assertEqualArray(numpy.dot(X, idi.T) + cst, got['Y'], decimal=6) + def test_onnxt_runtime_global_average_pool(self): + x = x = numpy.random.randn(1, 3, 5, 5).astype(numpy.float32) + y = _global_average_pool(x).astype(numpy.float32) + + onx = OnnxGlobalAveragePool( + 'X', output_names=['Y'], + op_version=get_opset_number_from_onnx()) + model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, + target_opset=get_opset_number_from_onnx()) + oinf = OnnxInference(model_def) + got = oinf.run({'X': x}) + self.assertEqual(list(sorted(got)), ['Y']) + self.assertEqualArray(y, got['Y']) + + x = numpy.array([[[ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ]]]).astype(numpy.float32) + y = numpy.array([[[[5]]]]).astype(numpy.float32) + onx = OnnxGlobalAveragePool( + 'X', output_names=['Y'], + op_version=get_opset_number_from_onnx()) + model_def = onx.to_onnx({'X': x.astype(numpy.float32)}, + target_opset=get_opset_number_from_onnx()) + oinf = OnnxInference(model_def) + got = oinf.run({'X': x}) + self.assertEqual(list(sorted(got)), ['Y']) + self.assertEqualArray(y, got['Y']) + + python_tested.append(OnnxGlobalAveragePool) + def test_onnxt_runtime_greater(self): self.common_test_onnxt_runtime_binary(OnnxGreater, numpy.greater) diff --git a/mlprodict/__init__.py b/mlprodict/__init__.py index 50e5777b6..dcded8681 100644 --- a/mlprodict/__init__.py +++ b/mlprodict/__init__.py @@ -4,7 +4,7 @@ @brief Ways to speed up predictions for a machine learned model. """ -__version__ = "0.4.1207" +__version__ = "0.4.1208" __author__ = "Xavier Dupré" diff --git a/mlprodict/onnxrt/ops_cpu/_op_list.py b/mlprodict/onnxrt/ops_cpu/_op_list.py index 573d19227..628ebb734 100644 --- a/mlprodict/onnxrt/ops_cpu/_op_list.py +++ b/mlprodict/onnxrt/ops_cpu/_op_list.py @@ -36,6 +36,7 @@ from .op_gather import Gather from .op_gather_elements import GatherElements from .op_gemm import Gemm +from .op_global_average_pool import GlobalAveragePool from .op_greater import Greater from .op_floor import Floor from .op_identity import Identity diff --git a/mlprodict/onnxrt/ops_cpu/op_global_average_pool.py b/mlprodict/onnxrt/ops_cpu/op_global_average_pool.py new file mode 100644 index 000000000..dfa6cce69 --- /dev/null +++ b/mlprodict/onnxrt/ops_cpu/op_global_average_pool.py @@ -0,0 +1,33 @@ +# -*- encoding: utf-8 -*- +# pylint: disable=E0203,E1101,C0111 +""" +@file +@brief Runtime operator. +""" +import numpy +from ..shape_object import ShapeObject +from ._op import OpRun + + +def _global_average_pool(x): + spatial_shape = numpy.ndim(x) - 2 + y = numpy.average( + x, axis=tuple(range(spatial_shape, spatial_shape + 2))) + for _ in range(spatial_shape): + y = numpy.expand_dims(y, -1) + return y + + +class GlobalAveragePool(OpRun): + + def __init__(self, onnx_node, desc=None, **options): + OpRun.__init__(self, onnx_node, desc=desc, + **options) + + def _run(self, x): # pylint: disable=W0221 + res = _global_average_pool(x) + return (res, ) + + def _infer_shapes(self, x): # pylint: disable=W0221 + shape = x.shape[:2] + (1, ) * (len(x.shape) - 2) + return (ShapeObject(shape, dtype=x.dtype), )