Skip to content
This repository was archived by the owner on Jan 13, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 53 additions & 1 deletion _unittests/ut_onnxrt/test_cpu_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def test_cpu_conv_init(self):
W = numpy.random.rand(24, 96, 1, 1).astype(numpy.float32)

onx = OnnxConv(
'X', W, output_names=['Y'],
'X', 'W', output_names=['Y'],
auto_pad='NOTSET', group=1, dilations=[1, 1],
kernel_shape=[1, 1], pads=[0, 0, 0, 0], strides=[1, 1],
op_version=get_opset_number_from_onnx())
Expand All @@ -80,6 +80,58 @@ def test_cpu_conv_init(self):
ii, diff[ii], gotrt['Y'].ravel()[ii], got['Y'].ravel()[ii]))
self.assertEqualArray(gotrt['Y'], got['Y'], decimal=5)

def test_cpu_conv_group(self):
x = numpy.random.rand(1, 3, 3, 4).astype(numpy.float32)
W = numpy.random.rand(9, 1, 3, 3).astype(numpy.float32)

onx = OnnxConv(
'X', 'W', output_names=['Y'],
auto_pad='NOTSET', group=3, dilations=[1, 1],
kernel_shape=[3, 3], strides=[1, 1],
op_version=get_opset_number_from_onnx())
model_def = onx.to_onnx({'X': x.astype(numpy.float32),
'W': W.astype(numpy.float32)},
target_opset=get_opset_number_from_onnx())
oinf = OnnxInference(model_def)
oinfrt = OnnxInference(model_def, runtime='onnxruntime1')
d = oinf.sequence_[-1].ops_.atts_value
self.assertIsInstance(d, dict)
self.assertEqual(d['kernel_shape'].tolist(), [3, 3])

xs = [
numpy.random.rand(1, 3, 3, 4).astype(numpy.float32),
numpy.array([1.0, 4.0, 7.0, 10.0, 13.0, 16.0, 19.0, 22.0, 25.0, 28.0, 31.0,
34.0, 2.0, 5.0, 8.0, 11.0, 14.0, 17.0, 20.0, 23.0, 26.0, 29.0,
32.0, 35.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0,
30.0, 33.0, 36.0], dtype=numpy.float32).reshape((1, 3, 3, 4))]
Ws = [
numpy.random.rand(9, 1, 3, 3).astype(numpy.float32),
numpy.array([1.0, 10.0, 19.0, 28.0, 37.0, 46.0, 55.0, 64.0,
73.0, 2.0, 11.0, 20.0, 29.0, 38.0, 47.0, 56.0, 65.0, 74.0,
3.0, 12.0, 21.0, 30.0, 39.0, 48.0, 57.0, 66.0, 75.0, 4.0,
13.0, 22.0, 31.0, 40.0, 49.0, 58.0, 67.0, 76.0, 5.0, 14.0,
23.0, 32.0, 41.0, 50.0, 59.0, 68.0, 77.0, 6.0, 15.0, 24.0,
33.0, 42.0, 51.0, 60.0, 69.0, 78.0, 7.0, 16.0, 25.0, 34.0,
43.0, 52.0, 61.0, 70.0, 79.0, 8.0, 17.0, 26.0, 35.0, 44.0,
53.0, 62.0, 71.0, 80.0, 9.0, 18.0, 27.0, 36.0, 45.0, 54.0,
63.0, 72.0, 81.0], dtype=numpy.float32).reshape((9, 1, 3, 3))]

for x, W in zip(xs, Ws):
x = numpy.asfortranarray(x)
W = numpy.asfortranarray(W)
got = oinf.run({'X': x, 'W': W})
gotrt = oinfrt.run({'X': x, 'W': W})
diff = list(numpy.abs((gotrt['Y'] - got['Y']).ravel()))
sdiff = list(sorted(diff))
if sdiff[-1] > 1e-5:
raise AssertionError("runtimes disagree {}".format(sdiff[-5:]))
for ii in range(len(diff)): # pylint: disable=C0200
if numpy.isnan(diff[ii]):
raise AssertionError(
"runtimes disagree about nan {}: {} # {} ? {}".format(
ii, diff[ii], gotrt['Y'].ravel()[ii], got['Y'].ravel()[ii]))
self.assertEqualArray(gotrt['Y'], got['Y'], decimal=5)


if __name__ == "__main__":
unittest.main()
8 changes: 8 additions & 0 deletions mlprodict/onnxrt/ops_cpu/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,14 @@ def _to_python_numpy(self, inputs, numpy_name):
return ("import numpy",
"return numpy.%s(%s)" % (numpy_name, ", ".join(inputs)))

@property
def atts_value(self):
"Returns all parameters in a dictionary."
if hasattr(self, 'atts'):
return {k: getattr(self, k)
for k in self.atts} # pylint: disable=E1101
return None


class OpRunUnary(OpRun):
"""
Expand Down
8 changes: 6 additions & 2 deletions mlprodict/onnxrt/ops_cpu/op_conv_.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ class Conv {
py::array_t<int64_t> pads,
py::array_t<int64_t> strides);

py::array_t<T> compute(py::array_t<T> X, py::array_t<T> W, py::array_t<T> B) const;
py::array_t<T> compute(py::array_t<T, py::array::c_style | py::array::forcecast> X,
py::array_t<T, py::array::c_style | py::array::forcecast> W,
py::array_t<T, py::array::c_style | py::array::forcecast> B) const;

private:

Expand Down Expand Up @@ -117,7 +119,9 @@ void Conv<T>::compute_kernel_shape(const std::vector<int64_t>& weight_shape,


template<typename T>
py::array_t<T> Conv<T>::compute(py::array_t<T> X, py::array_t<T> W, py::array_t<T> B) const {
py::array_t<T> Conv<T>::compute(py::array_t<T, py::array::c_style | py::array::forcecast> X,
py::array_t<T, py::array::c_style | py::array::forcecast> W,
py::array_t<T, py::array::c_style | py::array::forcecast> B) const {

std::vector<int64_t> x_dims;
arrayshape2vector(x_dims, X);
Expand Down