Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[v1.x] Update onnx support to work with onnx 1.7.0 with most CV models #19017

Merged
merged 35 commits into from
Sep 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
51715d7
fix pooling_convention warning when convert model to onnx (#18529)
HaoLiuHust Aug 10, 2020
7b7141b
Prevent uninitialized variable error.
Aug 18, 2020
aa1515b
Initial work to get Dropout to work with onnx 1.7
Aug 26, 2020
77fb75f
Remove trailing whitespace for pylint.
Aug 26, 2020
ae1e74d
Fix tensor initialization for Dropout operator input.
Aug 27, 2020
0faeeef
Update Clip operator to support latest ONNX opset versions by moving …
Aug 27, 2020
e9453c5
Fix whitespace.
Aug 27, 2020
1d5b664
Add support for importing Dropout operator in ONNX opset version >= 12.
Aug 28, 2020
9c5c034
Add support for import ONNX opsets >= 11 to clip operator.
Aug 28, 2020
aabcdd5
Add optional opset_version parameter that defaults to latest opset ve…
Aug 28, 2020
edd6f53
Add optional parameter to create_model() that allows user to specify …
Aug 28, 2020
2dfa22f
Use opset_version argument to determine operator format.
Aug 28, 2020
6c4e555
Add a opset_version parameter to from_onnx() so at operator conversio…
Aug 28, 2020
7305b9d
For Clip and Dropout operators, use opset version from passed proto_o…
Aug 28, 2020
39da0fc
Use same tolerances that are in master.
Aug 31, 2020
e36c200
Change Pad operator to use inputs instead of attributes for newer ops…
Sep 1, 2020
e4a9318
Add documentation opset_version parameter.
Sep 1, 2020
85a0ea6
Add opset_version parameters to unit tests.
Sep 1, 2020
0738620
Add test script for testing inference with onnxruntime on CV models f…
Sep 1, 2020
885862d
Add license and clean up imports.
Sep 2, 2020
9bb2b47
Install onnxruntime in docker container for unit tests.
Sep 2, 2020
50d929c
Add onnxruntime to test dependencies.
Sep 2, 2020
a6e6967
Install onnxruntime into CentOS docker image.
Sep 2, 2020
0bfec8e
Disable testing squeezenet models for now.
Sep 2, 2020
26708e3
Update onnx version.
Sep 2, 2020
d620548
Fix typo.
Sep 2, 2020
c7b55c1
Use mx.image.imread instead of PIL module.
Sep 2, 2020
f49e47a
ONNX import: use Conv pad attribute for symmetrical padding (#18675)
Kh4L Jul 24, 2020
36d92ca
Install onnx in CentOS containers when installing python.
Sep 2, 2020
b102f78
Update import and export of some ONNX ops to support newer opset vers…
Sep 3, 2020
8bd6a64
Re-enable squeezenet model testings in onnxruntime.
Sep 3, 2020
a3ea851
Run the onnxruntime inference tests in the ONNX pipeline instead of n…
Sep 3, 2020
a5246fe
Add missed return value.
Sep 3, 2020
29dcdf3
Refactor code based on review comment.
Sep 8, 2020
d597b5a
Since the onnx tests are only run on ubuntu_cpu images, we don't need…
Sep 8, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions ci/docker/install/ubuntu_onnx.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,5 +30,5 @@ echo "Installing libprotobuf-dev and protobuf-compiler ..."
apt-get update || true
apt-get install -y libprotobuf-dev protobuf-compiler

echo "Installing pytest, pytest-cov, protobuf, Pillow, ONNX and tabulate ..."
pip3 install pytest==3.6.3 pytest-cov==2.5.1 protobuf==3.5.2 onnx==1.3.0 Pillow==5.0.0 tabulate==0.7.5
echo "Installing pytest, pytest-cov, protobuf, Pillow, ONNX, tabulate and onnxruntime..."
pip3 install pytest==3.6.3 pytest-cov==2.5.1 protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.4.0
4 changes: 3 additions & 1 deletion ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1228,11 +1228,13 @@ unittest_centos7_gpu() {
integrationtest_ubuntu_cpu_onnx() {
set -ex
export PYTHONPATH=./python/
export DMLC_LOG_STACK_TRACE_DEPTH=10
export MXNET_SUBGRAPH_VERBOSE=0
export DMLC_LOG_STACK_TRACE_DEPTH=10
tests/python-pytest/onnx/backend_test.py
pytest tests/python-pytest/onnx/mxnet_export_test.py
pytest tests/python-pytest/onnx/test_models.py
pytest tests/python-pytest/onnx/test_node.py
pytest tests/python-pytest/onnx/test_onnxruntime.py
}

integrationtest_ubuntu_gpu_python() {
Expand Down
246 changes: 181 additions & 65 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def convert_weights_and_inputs(node, **kwargs):
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
raw=False
)
)

Expand Down Expand Up @@ -462,36 +462,73 @@ def convert_pad(node, **kwargs):
"""Map MXNet's pad operator attributes to onnx's Pad operator
and return the created node.
"""
opset_version = kwargs["opset_version"]
josephevans marked this conversation as resolved.
Show resolved Hide resolved
name, input_nodes, attrs = get_inputs(node, kwargs)

mxnet_pad_width = convert_string_to_list(attrs.get("pad_width"))
onnx_pad_width = transform_padding(mxnet_pad_width)

pad_mode = attrs.get("mode")
pad_value = np.float32(attrs.get("constant_value", 0.0))

if pad_mode == "constant":
pad_value = float(attrs.get("constant_value")) \
if "constant_value" in attrs else 0.0
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode='constant',
value=pad_value,
pads=onnx_pad_width,
name=name
)
if opset_version >= 11:
# starting with opset 11, pads and constant_value are inputs instead of attributes
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
pads_input_name = name + "_pads"
pads_input_type = onnx.TensorProto.INT64
pads_input_shape = np.shape(np.array(onnx_pad_width))
pads_value_node = make_tensor_value_info(pads_input_name, pads_input_type, pads_input_shape)
pads_tensor_node = make_tensor(pads_input_name, pads_input_type, pads_input_shape, onnx_pad_width)
initializer.append(pads_tensor_node)
input_nodes.append(pads_input_name)

if pad_mode == "constant":
const_input_name = name + "_constant"
const_input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[pad_value.dtype]
const_value_node = make_tensor_value_info(const_input_name, const_input_type, ())
const_tensor_node = make_tensor(const_input_name, const_input_type, (), [pad_value])
initializer.append(const_tensor_node)
input_nodes.append(const_input_name)
pad_node = onnx.helper.make_node(
"Pad",
input_nodes,
[name],
mode=pad_mode,
name=name
)
return [pads_value_node, const_value_node, pad_node]
else:
pad_node = onnx.helper.make_node(
"Pad",
input_nodes,
[name],
mode=pad_mode,
name=name
)
return [pads_value_node, pad_node]
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)

return [node]
if pad_mode == "constant":
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode='constant',
value=pad_value,
pads=onnx_pad_width,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)
return [node]


def create_helper_trans_node(op_name, input_node, node_name):
Expand Down Expand Up @@ -639,6 +676,7 @@ def convert_pooling(node, **kwargs):
MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators
based on the input node's attributes and return the created node.
"""
opset_version = kwargs["opset_version"]
name, input_nodes, attrs = get_inputs(node, kwargs)

kernel = eval(attrs["kernel"])
Expand All @@ -648,13 +686,14 @@ def convert_pooling(node, **kwargs):
p_value = attrs.get('p_value', 'None')

pooling_convention = attrs.get('pooling_convention', 'valid')

ceil_mode = False
if pooling_convention == 'full':
pooling_warning = "Pooling: ONNX currently doesn't support pooling_convention. " \
"This might lead to shape or accuracy issues. " \
"https://github.com/onnx/onnx/issues/549"

logging.warning(pooling_warning)
if opset_version < 10:
pooling_warning = "Pooling: ONNX lower than 1.5.0 doesn't support pooling_convention. " \
"This might lead to shape or accuracy issues. " \
"https://github.com/onnx/onnx/issues/549"
logging.warning(pooling_warning)
ceil_mode = True

pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
pad_dims = pad_dims + pad_dims
Expand Down Expand Up @@ -694,15 +733,27 @@ def convert_pooling(node, **kwargs):
name=name
)
else:
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)
if opset_version >= 10:
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name,
ceil_mode=ceil_mode
)
else:
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
[name],
kernel_shape=kernel,
pads=pad_dims,
strides=stride,
name=name
)

return [node]

Expand Down Expand Up @@ -945,17 +996,35 @@ def convert_dropout(node, **kwargs):
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs["opset_version"]
josephevans marked this conversation as resolved.
Show resolved Hide resolved

probability = float(attrs.get("p", 0.5))

dropout_node = onnx.helper.make_node(
"Dropout",
input_nodes,
[name],
ratio=probability,
name=name
)
return [dropout_node]
if opset_version >= 12:
# opset >= 12 requires the ratio to be an input
initializer = kwargs["initializer"]
ratio_input_name = name + "_ratio"
value_node = onnx.helper.make_tensor_value_info(ratio_input_name,
onnx.TensorProto.FLOAT, ())
tensor_node = onnx.helper.make_tensor(ratio_input_name, onnx.TensorProto.FLOAT,
(), [probability])
initializer.append(tensor_node)
dropout_node = onnx.helper.make_node(
"Dropout",
[input_nodes[0], ratio_input_name],
[name],
name=name
)
return [value_node, dropout_node]
else:
dropout_node = onnx.helper.make_node(
"Dropout",
input_nodes,
[name],
ratio=probability,
name=name
)
return [dropout_node]


@mx_op.register("Flatten")
Expand All @@ -971,19 +1040,46 @@ def convert_clip(node, **kwargs):
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs["opset_version"]

a_min = np.float(attrs.get('a_min', -np.inf))
a_max = np.float(attrs.get('a_max', np.inf))
a_min = float(attrs.get('a_min', -np.inf))
a_max = float(attrs.get('a_max', np.inf))

clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node]
if opset_version >= 11:
# opset >= 11 requires min/max to be inputs
initializer = kwargs["initializer"]
min_input_name = name + "_min"
max_input_name = name + "_max"
min_value_node = onnx.helper.make_tensor_value_info(min_input_name,
onnx.TensorProto.FLOAT, ())
max_value_node = onnx.helper.make_tensor_value_info(max_input_name,
onnx.TensorProto.FLOAT, ())
min_tensor_node = onnx.helper.make_tensor(min_input_name, onnx.TensorProto.FLOAT,
(), [a_min])
max_tensor_node = onnx.helper.make_tensor(max_input_name, onnx.TensorProto.FLOAT,
(), [a_max])
initializer.append(min_tensor_node)
initializer.append(max_tensor_node)
input_nodes.append(min_input_name)
input_nodes.append(max_input_name)
clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name
)
return [min_value_node, max_value_node, clip_node]

else:
clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node]


def scalar_op_helper(node, op_name, **kwargs):
Expand Down Expand Up @@ -2070,14 +2166,34 @@ def convert_topk(node, **kwargs):
else:
raise NotImplementedError("ONNX expects both value and indices as output")

topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
k=k,
name=name
)
opset_version = kwargs['opset_version']
if opset_version >= 10:
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
k_input_name = name + "_k"
k_input_type = onnx.TensorProto.INT64
k_value_node = make_tensor_value_info(k_input_name, k_input_type, ())
k_tensor_node = make_tensor(k_input_name, k_input_type, (), k)
initializer.append(k_tensor_node)
input_nodes.append(k_input_name)

topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
name=name
)
return [k_value_node, topk_node]
else:
topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
k=k,
name=name
)

return [topk_node]

Expand Down
10 changes: 7 additions & 3 deletions python/mxnet/contrib/onnx/mx2onnx/export_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@


def export_model(sym, params, input_shape, input_type=np.float32,
onnx_file_path='model.onnx', verbose=False):
onnx_file_path='model.onnx', verbose=False, opset_version=None):
"""Exports the MXNet model file, passed as a parameter, into ONNX model.
Accepts both symbol,parameter objects as well as json and params filepaths as input.
Operator support and coverage -
Expand Down Expand Up @@ -63,11 +63,15 @@ def export_model(sym, params, input_shape, input_type=np.float32,

try:
from onnx import helper, mapping
from onnx.defs import onnx_opset_version
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")

converter = MXNetGraph()
if opset_version is None:
# default is to use latest opset version the onnx package supports
opset_version = onnx_opset_version()

data_format = np.dtype(input_type)
# if input parameters are strings(file paths), load files and create symbol parameter objects
Expand All @@ -76,11 +80,11 @@ def export_model(sym, params, input_shape, input_type=np.float32,
sym_obj, params_obj = load_module(sym, params)
onnx_graph = converter.create_onnx_graph_proto(sym_obj, params_obj, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
verbose=verbose, opset_version=opset_version)
elif isinstance(sym, symbol.Symbol) and isinstance(params, dict):
onnx_graph = converter.create_onnx_graph_proto(sym, params, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
verbose=verbose, opset_version=opset_version)
else:
raise ValueError("Input sym and params should either be files or objects")

Expand Down