Skip to content
This repository was archived by the owner on Jan 13, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 35 additions & 2 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
OnnxDiv,
OnnxEinsum, OnnxEqual, OnnxErf, OnnxExp, OnnxEyeLike,
OnnxFlatten, OnnxFloor,
OnnxGreater, OnnxGemm,
OnnxGreater, OnnxGemm, OnnxGlobalAveragePool,
OnnxIdentity, OnnxIsNaN,
OnnxLog, OnnxLpNormalization,
OnnxMatMul, OnnxMax, OnnxMean, OnnxMin, OnnxMul,
Expand All @@ -56,6 +56,7 @@
get_opset_number_from_onnx, get_ir_version_from_onnx)
from mlprodict.onnxrt.validate.validate_python import validate_python_inference
from mlprodict.onnxrt.ops_cpu.op_batch_normalization import _batchnorm_test_mode
from mlprodict.onnxrt.ops_cpu.op_global_average_pool import _global_average_pool
from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611
topk_element_min_double, topk_element_max_double, topk_element_fetch_double,
topk_element_min_float, topk_element_max_float, topk_element_fetch_float,
Expand Down Expand Up @@ -395,7 +396,7 @@ def test_onnxt_runtime_argmin_12(self):
self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64),
got['Y'], decimal=6)

def test_onnxt_batch_normalization(self):
def test_onnxt_runtime_batch_normalization(self):
# input size: (1, 2, 1, 3)
x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32)
s = numpy.array([1.0, 1.5]).astype(numpy.float32)
Expand Down Expand Up @@ -899,6 +900,38 @@ def do_test_onnxt_runtime_gemm(self, runtime):
self.assertEqualArray(numpy.dot(X, idi.T) +
cst, got['Y'], decimal=6)

def test_onnxt_runtime_global_average_pool(self):
x = x = numpy.random.randn(1, 3, 5, 5).astype(numpy.float32)
y = _global_average_pool(x).astype(numpy.float32)

onx = OnnxGlobalAveragePool(
'X', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])

x = numpy.array([[[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]]]).astype(numpy.float32)
y = numpy.array([[[[5]]]]).astype(numpy.float32)
onx = OnnxGlobalAveragePool(
'X', output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])

python_tested.append(OnnxGlobalAveragePool)

def test_onnxt_runtime_greater(self):
self.common_test_onnxt_runtime_binary(OnnxGreater, numpy.greater)

Expand Down
2 changes: 1 addition & 1 deletion mlprodict/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
@brief Ways to speed up predictions for a machine learned model.
"""

__version__ = "0.4.1207"
__version__ = "0.4.1208"
__author__ = "Xavier Dupré"


Expand Down
1 change: 1 addition & 0 deletions mlprodict/onnxrt/ops_cpu/_op_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from .op_gather import Gather
from .op_gather_elements import GatherElements
from .op_gemm import Gemm
from .op_global_average_pool import GlobalAveragePool
from .op_greater import Greater
from .op_floor import Floor
from .op_identity import Identity
Expand Down
33 changes: 33 additions & 0 deletions mlprodict/onnxrt/ops_cpu/op_global_average_pool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ..shape_object import ShapeObject
from ._op import OpRun


def _global_average_pool(x):
spatial_shape = numpy.ndim(x) - 2
y = numpy.average(
x, axis=tuple(range(spatial_shape, spatial_shape + 2)))
for _ in range(spatial_shape):
y = numpy.expand_dims(y, -1)
return y


class GlobalAveragePool(OpRun):

def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
**options)

def _run(self, x): # pylint: disable=W0221
res = _global_average_pool(x)
return (res, )

def _infer_shapes(self, x): # pylint: disable=W0221
shape = x.shape[:2] + (1, ) * (len(x.shape) - 2)
return (ShapeObject(shape, dtype=x.dtype), )