Skip to content
This repository was archived by the owner on Jan 13, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions _unittests/ut_onnxrt/test_onnxrt_python_runtime_.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from sklearn.utils.testing import ignore_warnings
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxAbs, OnnxAdd, OnnxArgMax, OnnxArgMin,
OnnxBatchNormalization,
OnnxConcat,
OnnxCeil, OnnxClip, OnnxConstant, OnnxConstantOfShape,
OnnxDequantizeLinear,
Expand Down Expand Up @@ -54,6 +55,7 @@
from mlprodict.tools.asv_options_helper import (
get_opset_number_from_onnx, get_ir_version_from_onnx)
from mlprodict.onnxrt.validate.validate_python import validate_python_inference
from mlprodict.onnxrt.ops_cpu.op_batch_normalization import _batchnorm_test_mode
from mlprodict.onnxrt.ops_cpu._op_onnx_numpy import ( # pylint: disable=E0611
topk_element_min_double, topk_element_max_double, topk_element_fetch_double,
topk_element_min_float, topk_element_max_float, topk_element_fetch_float,
Expand Down Expand Up @@ -393,6 +395,47 @@ def test_onnxt_runtime_argmin_12(self):
self.assertEqualArray(numpy.array([2, 1], dtype=numpy.int64),
got['Y'], decimal=6)

def test_onnxt_batch_normalization(self):
# input size: (1, 2, 1, 3)
x = numpy.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(numpy.float32)
s = numpy.array([1.0, 1.5]).astype(numpy.float32)
bias = numpy.array([0, 1]).astype(numpy.float32)
mean = numpy.array([0, 3]).astype(numpy.float32)
var = numpy.array([1, 1.5]).astype(numpy.float32)
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(numpy.float32)

onx = OnnxBatchNormalization(
'X', s, bias, mean, var, output_names=['Y'],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])

# input size: (2, 3, 4, 5)
x = numpy.random.randn(2, 3, 4, 5).astype(numpy.float32)
s = numpy.random.randn(3).astype(numpy.float32)
bias = numpy.random.randn(3).astype(numpy.float32)
mean = numpy.random.randn(3).astype(numpy.float32)
var = numpy.random.rand(3).astype(numpy.float32)
epsilon = 1e-2
y = _batchnorm_test_mode(
x, s, bias, mean, var, epsilon).astype(numpy.float32)

onx = OnnxBatchNormalization(
'X', s, bias, mean, var,
output_names=['Y'], epsilon=epsilon,
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
got = oinf.run({'X': x})
self.assertEqual(list(sorted(got)), ['Y'])
self.assertEqualArray(y, got['Y'])
python_tested.append(OnnxBatchNormalization)

def test_onnxt_runtime_ceil(self):
self.common_test_onnxt_runtime_unary(OnnxCeil, numpy.ceil)

Expand Down
2 changes: 1 addition & 1 deletion mlprodict/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
@brief Ways to speed up predictions for a machine learned model.
"""

__version__ = "0.4.1215"
__version__ = "0.4.1207"
__author__ = "Xavier Dupré"


Expand Down
1 change: 1 addition & 0 deletions mlprodict/onnxrt/ops_cpu/_op_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .op_argmax import ArgMax
from .op_argmin import ArgMin
from .op_array_feature_extractor import ArrayFeatureExtractor
from .op_batch_normalization import BatchNormalization
from .op_binarizer import Binarizer
from .op_cast import Cast
from .op_cdist import CDist
Expand Down
36 changes: 36 additions & 0 deletions mlprodict/onnxrt/ops_cpu/op_batch_normalization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun


def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5):
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
return s * (x - mean) / numpy.sqrt(var + epsilon) + bias


class BatchNormalization(OpRun):

atts = {'epsilon': 1e-5, 'momentum': 0.9}

def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=BatchNormalization.atts,
**options)

def _run(self, x, scale, bias, mean, var): # pylint: disable=W0221
res = _batchnorm_test_mode(
x, scale, bias, mean, var, epsilon=self.epsilon)
return (res, )

def _infer_shapes(self, x, scale, bias, mean, var): # pylint: disable=W0221
return (x, )