Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add backward conversions from 18->17 for reduce ops #5606

Merged
merged 20 commits into from Sep 26, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 4 additions & 1 deletion docs/AddNewOp.md
Expand Up @@ -74,7 +74,10 @@ Once the criteria of proposing new operator/function has been satisfied, you wil
1. The testing examples will be extracted to the doc.
2. We also generate binary data for it.
3. Example: [onnx/backend/test/case/node/abs.py](/onnx/backend/test/case/node/abs.py)
5. Add at least one automatic upgrade test for your operator in [onnx/test/automatic_upgrade_test.py](/onnx/test/automatic_upgrade_test.py) using `_test_op_upgrade`. These tests create a given operator at a given opset version (usually the version the operator was introduced in) and test that the version converter is able to convert them to the highest available version. So for a new operator `_test_op_upgrade` will not test anything, but as soon as the operator gets updated in a future opset the test will automatically become nontrivial.
5. Write upgrade and downgrade tests:
1. Add at least one automatic upgrade test for your operator in [onnx/test/automatic_upgrade_test.py](/onnx/test/automatic_upgrade_test.py) using `_test_op_upgrade`. These tests create a given operator at a given opset version (usually the version the operator was introduced in) and test that the version converter is able to convert them to the highest available version. So for a new operator `_test_op_upgrade` will not test anything, but as soon as the operator gets updated in a future opset the test will automatically become nontrivial.
2. Similarly add at least one automatic downgrade test for your operator in [onnx/test/automatic_downgrade_test.py](/onnx/test/automatic_downgrade_test.py) using `_test_op_downgrade`. Specifying the current version so that once the op is updated at a higher opset version the test will ensure downward conversion is validated.

6. Update the documentation and generate the test data.
1. Running [the script](/tools/update_doc.sh). If you have files under `onnx/backend/test/data/node` which cannot be generated by the scripts from `onnx/backend/test/case/node`, please further use `python onnx/backend/test/cmd_tools.py generate-data --clean` to cleanup the directory and only preserve needed test data.
to update the doc and generate the test data.
Expand Down
110 changes: 110 additions & 0 deletions onnx/test/automatic_conversion_test_base.py
@@ -0,0 +1,110 @@
# Copyright (c) ONNX Project Contributors

# SPDX-License-Identifier: Apache-2.0
import string
import unittest
from typing import Any, Dict, List, Optional, Sequence, Union, cast

import onnx
from onnx import TensorProto, ValueInfoProto, helper, shape_inference, version_converter

LATEST_OPSET = onnx.defs.onnx_opset_version()


class TestAutomaticConversion(unittest.TestCase):
def _test_op_conversion(
self,
op: str,
from_opset: int,
input_shapes: Sequence[Union[Sequence[Optional[int]], str]] = ((3, 4, 5),),
output_shapes: Sequence[Sequence[Optional[int]]] = ((3, 4, 5),),
input_types: Optional[Sequence[Any]] = None,
output_types: Optional[Sequence[Any]] = None,
initializer: Sequence[Any] = (),
attrs: Optional[Dict[str, Any]] = None,
seq_inputs: Sequence[int] = (),
seq_outputs: Sequence[int] = (),
optional_inputs: Sequence[int] = (),
optional_outputs: Sequence[int] = (),
is_upgrade: bool = True,
) -> None:
"""
When is_upgrade is True (default), run version converter from from_opset to the most recent opset version.
When is_upgrade is False, run version converter from the most recent opset version to from_opset.
In both cases, runs checker and shape inference on the final model.
"""
liqunfu marked this conversation as resolved.
Show resolved Hide resolved
if attrs is None:
attrs = {}

n_inputs = len(input_shapes)
letters = list(string.ascii_lowercase)[:n_inputs]
input_names = [
letter if shape != "" else ""
for (letter, shape) in zip(letters, input_shapes)
]
if input_types is None:
input_types = [TensorProto.FLOAT] * n_inputs
is_sequence = [0 if id not in seq_inputs else 1 for id in range(n_inputs)]
is_optional = [0 if id not in optional_inputs else 1 for id in range(n_inputs)]
# turn empty strings into [0] to ease type analysis, even though those entries
# will be ignored
input_shapes_cast = cast(
List[List[int]],
[[0] if isinstance(shape, str) else shape for shape in input_shapes],
)
inputs: List[ValueInfoProto] = []
for name, ttype, shape, is_seq, is_opt in zip(
input_names, input_types, input_shapes_cast, is_sequence, is_optional
):
if name != "":
if is_seq:
inputs += [
helper.make_tensor_sequence_value_info(name, ttype, shape)
]
elif is_opt:
type_proto = helper.make_tensor_type_proto(ttype, shape)
optional_type_proto = helper.make_optional_type_proto(type_proto)
inputs += [helper.make_value_info(name, optional_type_proto)]
else:
inputs += [helper.make_tensor_value_info(name, ttype, shape)]

n_outputs = len(output_shapes)
output_names = list(string.ascii_lowercase)[n_inputs : n_inputs + n_outputs]
if output_types is None:
output_types = [TensorProto.FLOAT] * n_outputs
is_sequence = [0 if id not in seq_outputs else 1 for id in range(n_outputs)]
is_optional = [
0 if id not in optional_outputs else 1 for id in range(n_outputs)
]
output_shapes_cast = cast(
List[List[int]],
[[0] if isinstance(shape, str) else shape for shape in output_shapes],
)
outputs: List[ValueInfoProto] = []
for name, ttype, shape, is_seq, is_opt in zip(
output_names, output_types, output_shapes_cast, is_sequence, is_optional
):
if is_seq:
outputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
elif is_opt:
type_proto = helper.make_tensor_type_proto(ttype, shape)
optional_type_proto = helper.make_optional_type_proto(type_proto)
outputs += [helper.make_value_info(name, optional_type_proto)]
else:
outputs += [helper.make_tensor_value_info(name, ttype, shape)]

node = helper.make_node(op, input_names, output_names, **attrs)
graph = helper.make_graph([node], op, inputs, outputs, initializer)
start_opset = from_opset if is_upgrade else LATEST_OPSET
end_opset = LATEST_OPSET if is_upgrade else from_opset
original = helper.make_model(
graph,
producer_name="test",
opset_imports=[helper.make_opsetid("", start_opset)],
)
onnx.checker.check_model(original)
shape_inference.infer_shapes(original, strict_mode=True)

converted = version_converter.convert_version(original, end_opset)
onnx.checker.check_model(converted)
shape_inference.infer_shapes(converted, strict_mode=True)
48 changes: 48 additions & 0 deletions onnx/test/automatic_downgrade_test.py
liqunfu marked this conversation as resolved.
Show resolved Hide resolved
@@ -0,0 +1,48 @@
# Copyright (c) ONNX Project Contributors
Fixed Show fixed Hide fixed
Fixed Show fixed Hide fixed
Fixed Show fixed Hide fixed
Fixed Show fixed Hide fixed

# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from automatic_conversion_test_base import TestAutomaticConversion
liqunfu marked this conversation as resolved.
Show resolved Hide resolved

from onnx import TensorProto, helper

#####################################################################################
# Every test calls _test_op_conversion to downgrade a model from the most recent opset version
# to a early version and runs checker + shape inference on the downgraded model.
####################################################################################


class TestAutomaticDowngrade(TestAutomaticConversion):
def _test_op_downgrade(self, op, *args, **kwargs):
liqunfu marked this conversation as resolved.
Show resolved Hide resolved
self._test_op_conversion(op, *args, **kwargs, is_upgrade=False)

def test_ReduceOps(self) -> None:
liqunfu marked this conversation as resolved.
Show resolved Hide resolved
axes = helper.make_tensor(
"b", TensorProto.INT64, dims=[3], vals=np.array([0, 1, 2])
)
reduce_ops = [
"ReduceL1",
"ReduceL2",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceMean",
"ReduceMax",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
]
for reduce_op in reduce_ops:
liqunfu marked this conversation as resolved.
Show resolved Hide resolved
liqunfu marked this conversation as resolved.
Show resolved Hide resolved
self._test_op_downgrade(
reduce_op,
13,
[[3, 4, 5], [3]],
[[1, 1, 1]],
[TensorProto.FLOAT, TensorProto.INT64],
initializer=[axes],
)


if __name__ == "__main__":
unittest.main()
104 changes: 7 additions & 97 deletions onnx/test/automatic_upgrade_test.py
@@ -1,116 +1,26 @@
# Copyright (c) ONNX Project Contributors

# SPDX-License-Identifier: Apache-2.0
import string
import unittest
from typing import Any, Dict, List, Optional, Sequence, Union, cast

import numpy as np
from automatic_conversion_test_base import TestAutomaticConversion

import onnx
from onnx import TensorProto, ValueInfoProto, helper, shape_inference, version_converter
from onnx import TensorProto, helper

#####################################################################################
# Every test creates a model containing a single operator from the lowest possible
# opset version, upgrades it to the most recent opset version and then runs checker +
# shape inference on the upgraded model.
# Every test calls _test_op_conversion to upgrade a model from an initial opset version
# to the most recent version and runs checker and shape inference on the final upgraded model.
####################################################################################

LATEST_OPSET = onnx.defs.onnx_opset_version()
tested_ops = []


class TestAutomaticUpgrade(unittest.TestCase):
def _test_op_upgrade(
self,
op: str,
from_opset: int,
input_shapes: Sequence[Union[Sequence[Optional[int]], str]] = ((3, 4, 5),),
output_shapes: Sequence[Sequence[Optional[int]]] = ((3, 4, 5),),
input_types: Optional[Sequence[Any]] = None,
output_types: Optional[Sequence[Any]] = None,
initializer: Sequence[Any] = (),
attrs: Optional[Dict[str, Any]] = None,
seq_inputs: Sequence[int] = (),
seq_outputs: Sequence[int] = (),
optional_inputs: Sequence[int] = (),
optional_outputs: Sequence[int] = (),
) -> None:
if attrs is None:
attrs = {}

class TestAutomaticUpgrade(TestAutomaticConversion):
def _test_op_upgrade(self, op, *args, **kwargs):
tested_ops.append(op)

n_inputs = len(input_shapes)
letters = list(string.ascii_lowercase)[:n_inputs]
input_names = [
letter if shape != "" else ""
for (letter, shape) in zip(letters, input_shapes)
]
if input_types is None:
input_types = [TensorProto.FLOAT] * n_inputs
is_sequence = [0 if id not in seq_inputs else 1 for id in range(n_inputs)]
is_optional = [0 if id not in optional_inputs else 1 for id in range(n_inputs)]
# turn empty strings into [0] to ease type analysis, even though those entries
# will be ignored
input_shapes_cast = cast(
List[List[int]],
[[0] if isinstance(shape, str) else shape for shape in input_shapes],
)
inputs: List[ValueInfoProto] = []
for name, ttype, shape, is_seq, is_opt in zip(
input_names, input_types, input_shapes_cast, is_sequence, is_optional
):
if name != "":
if is_seq:
inputs += [
helper.make_tensor_sequence_value_info(name, ttype, shape)
]
elif is_opt:
type_proto = helper.make_tensor_type_proto(ttype, shape)
optional_type_proto = helper.make_optional_type_proto(type_proto)
inputs += [helper.make_value_info(name, optional_type_proto)]
else:
inputs += [helper.make_tensor_value_info(name, ttype, shape)]

n_outputs = len(output_shapes)
output_names = list(string.ascii_lowercase)[n_inputs : n_inputs + n_outputs]
if output_types is None:
output_types = [TensorProto.FLOAT] * n_outputs
is_sequence = [0 if id not in seq_outputs else 1 for id in range(n_outputs)]
is_optional = [
0 if id not in optional_outputs else 1 for id in range(n_outputs)
]
output_shapes_cast = cast(
List[List[int]],
[[0] if isinstance(shape, str) else shape for shape in output_shapes],
)
outputs: List[ValueInfoProto] = []
for name, ttype, shape, is_seq, is_opt in zip(
output_names, output_types, output_shapes_cast, is_sequence, is_optional
):
if is_seq:
outputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
elif is_opt:
type_proto = helper.make_tensor_type_proto(ttype, shape)
optional_type_proto = helper.make_optional_type_proto(type_proto)
outputs += [helper.make_value_info(name, optional_type_proto)]
else:
outputs += [helper.make_tensor_value_info(name, ttype, shape)]

node = helper.make_node(op, input_names, output_names, **attrs)
graph = helper.make_graph([node], op, inputs, outputs, initializer)
original = helper.make_model(
graph,
producer_name="test",
opset_imports=[helper.make_opsetid("", from_opset)],
)
onnx.checker.check_model(original)
shape_inference.infer_shapes(original, strict_mode=True)

converted = version_converter.convert_version(original, LATEST_OPSET)
onnx.checker.check_model(converted)
shape_inference.infer_shapes(converted, strict_mode=True)
self._test_op_conversion(op, *args, **kwargs, is_upgrade=True)

def test_Abs(self) -> None:
self._test_op_upgrade("Abs", 1, attrs={"consumed_inputs": [0]})
Expand Down
11 changes: 11 additions & 0 deletions onnx/version_converter/convert.h
Expand Up @@ -545,6 +545,17 @@ class DefaultVersionConverter : public BaseVersionConverter {
registerAdapter(std::make_unique<AxesAttributeToInput>("ReduceProd", OpSetID(17), OpSetID(18)));
registerAdapter(std::make_unique<AxesAttributeToInput>("ReduceSumSquare", OpSetID(17), OpSetID(18)));

/******** 18 -> 17 ********/
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceL1", OpSetID(18), OpSetID(17)));
justinchuby marked this conversation as resolved.
Show resolved Hide resolved
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceL2", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceLogSum", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceLogSumExp", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceMax", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceMean", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceMin", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceProd", OpSetID(18), OpSetID(17)));
registerAdapter(std::make_unique<AxesInputToAttribute>("ReduceSumSquare", OpSetID(18), OpSetID(17)));

/******** 18 -> 19 ********/
registerAdapter(std::make_unique<CompatibleAdapter>("Equal", OpSetID(18), OpSetID(19)));
registerAdapter(std::make_unique<CompatibleAdapter>("AveragePool", OpSetID(18), OpSetID(19)));
Expand Down