Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
7dfc4b8
[Lint] rerun linter, fix errors
maltanar Sep 25, 2025
7456919
[Core] add get_opset_imports utility fxn to ModelWrapper
maltanar Sep 25, 2025
89396cd
[Core] return dict from ModelWrapper.get_opset_imports
maltanar Sep 25, 2025
db2994f
[Core] add versioned op to getCustomOp with fallback to old style
maltanar Sep 25, 2025
8a2db22
[Core] inrtoduce ModelWrapper.get_customop_wrapper
maltanar Sep 25, 2025
402a580
[Test] add basic unit tests for versioned custom op fetching
maltanar Sep 25, 2025
ad80561
Merge branch 'main' into feature/op_version
maltanar Oct 2, 2025
407fb13
[Test] extend test_customop_version for default v handler
maltanar Oct 2, 2025
feac9f0
[Core] opset ver. fallback for ModelWrapper.get_customop_wrapper
maltanar Oct 2, 2025
89eea4c
[Core] getCustomOp: default v to None, fetch highest available v.
maltanar Oct 2, 2025
ec517b5
[Test] cover newly added opset ver behavior in test_customop_version
maltanar Oct 2, 2025
7406dcf
Merge branch 'main' into feature/op_version
maltanar Oct 2, 2025
aeeff58
[Core, Util] distinguish preferred onnx opset from qonnx opset
maltanar Oct 2, 2025
5801504
[Core] respect selected opsets during execution
maltanar Oct 2, 2025
35b8b12
[CustomOp] alias all qonnx.custom_op.general as v1
maltanar Oct 2, 2025
d190a69
[ChanLast] alias existing channels_last ops as v1
maltanar Oct 2, 2025
5f58f49
[Test] add opsets for test_custom_onnx_exec
maltanar Oct 2, 2025
db0b15a
[ChanLast] emulate op ver agnostic dict for channels last ops
maltanar Oct 3, 2025
83c53ae
[Core] use isinstance instead of type check for custom_op
maltanar Oct 3, 2025
6bfc2a1
[ChanLast] derive fake custom_op from dict, ensure domain import
maltanar Oct 3, 2025
c9811c5
[QuantAvgPool2d] use preferred ONNX opset for exec_node() impl
maltanar Oct 3, 2025
073985d
[ChanLast] implement __contains__ for op registration
maltanar Oct 3, 2025
0260d98
Merge branch 'main' into feature/op_version
maltanar Oct 16, 2025
d982e5f
[CustomOp] use get_preferred_qonnx_opset as default
maltanar Oct 16, 2025
94cf223
[Registry] bugfix for getCustomOp inst opset version
maltanar Oct 16, 2025
32c0b3c
[Test] extra opset v checks in test_customop_version
maltanar Oct 16, 2025
6b9d277
[Core] introduce set_opset_import
maltanar Oct 23, 2025
787a47e
refactor: implement namespace-based opset versioning system
tafk7 Nov 2, 2025
f8a07af
refactor: remove explicit op_version attributes in favor of class nam…
tafk7 Nov 3, 2025
cb6e83b
[Linter] run pre-commit on all files
maltanar Nov 18, 2025
9e55a5d
[ChanLast] use new registry methods for oplist
maltanar Nov 18, 2025
751ba09
[Test] use new registry API in test_attr
maltanar Nov 18, 2025
7bfc299
[ChanLast] changes for new registry and op ver compatibility
maltanar Nov 18, 2025
4671158
[Test] switch is_finn_op to is_custom_op
maltanar Nov 19, 2025
83d337e
Merge branch 'main' into fix/op_version-main-merge-yaman-changes
maltanar Nov 19, 2025
fd7939f
[MaxpoolNHWC] Remove opset version for from execute_node fct
auphelia Nov 5, 2025
1b58cf8
[Test] fix incorrect import in test_channelslast
maltanar Nov 19, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions src/qonnx/core/execute_custom_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,9 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import qonnx.custom_op.registry as registry
from qonnx.util.basic import get_preferred_onnx_opset


def execute_custom_node(node, context, graph, onnx_opset_version=get_preferred_onnx_opset()):
def execute_custom_node(node, context, graph, onnx_opset_version):
"""Call custom implementation to execute a single custom node.
Input/output provided via context."""
op_type = node.op_type
Expand Down
17 changes: 8 additions & 9 deletions src/qonnx/core/onnx_exec.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,10 @@
import qonnx.analysis.topology as ta
import qonnx.core.execute_custom_node as ex_cu_node
from qonnx.custom_op.registry import is_custom_op
from qonnx.util.basic import (
get_preferred_onnx_opset,
get_sanitize_quant_tensors,
qonnx_make_model,
sanitize_quant_values,
)
from qonnx.util.basic import get_preferred_qonnx_opset, get_sanitize_quant_tensors, qonnx_make_model, sanitize_quant_values


def execute_node(node, context, graph, return_full_exec_context=False, opset_version=get_preferred_onnx_opset()):
def execute_node(node, context, graph, opset_version, return_full_exec_context=False):
"""Executes a single node by using onnxruntime or with a custom function.

Input/output provided via context."""
Expand Down Expand Up @@ -158,7 +153,7 @@ def execute_onnx(model, input_dict, return_full_exec_context=False, start_node=N
model_exec_mode = model.get_metadata_prop("exec_mode")
if (model_exec_mode is None) or (model_exec_mode == ""):
# extract opset version for node-by-node execution
opset_version = model.model.opset_import[0].version
opset_imports = model.get_opset_imports()
# execute the model node by node
# we can simply walk down the list since the ONNX spec guarantees that it is
# topologically sorted
Expand All @@ -176,7 +171,11 @@ def execute_onnx(model, input_dict, return_full_exec_context=False, start_node=N
if get_sanitize_quant_tensors() != 0:
# round input values to match quantization annotation
execution_context = sanitize_quant_values(model, node.input, execution_context)
execute_node(node, execution_context, graph, return_full_exec_context, opset_version)
if node.domain in opset_imports:
opset_version = opset_imports[node.domain]
else:
opset_version = get_preferred_qonnx_opset()
execute_node(node, execution_context, graph, opset_version, return_full_exec_context)
if get_sanitize_quant_tensors() != 0:
# round output values to quantization annotation
execution_context = sanitize_quant_values(model, node.output, execution_context)
Expand Down
26 changes: 23 additions & 3 deletions src/qonnx/custom_op/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,35 @@
import onnx.numpy_helper as np_helper
from abc import ABC, abstractmethod

from qonnx.util.basic import get_by_name, get_preferred_onnx_opset
from qonnx.util.basic import get_by_name, get_preferred_qonnx_opset


class CustomOp(ABC):
"""CustomOp class all custom op nodes are based on. Contains different functions
every custom node should have. Some as abstract methods, these have to be
filled when writing a new custom op node."""
filled when writing a new custom op node.

def __init__(self, onnx_node, onnx_opset_version=get_preferred_onnx_opset()):
Opset Version Support:
CustomOp classes use "since version" semantics matching ONNX operators.
Version is determined by the class name using _vN suffix convention:

- No suffix (e.g., IntQuant): Version 1 (default)
- _vN suffix (e.g., IntQuant_v2): Version N

The registry automatically selects the highest version <= requested opset.

Example:
class IntQuant(CustomOp):
pass # Version 1 (no suffix)

class IntQuant_v2(CustomOp):
pass # Version 2, covers opset v2-v3 (if no v3 exists)

class IntQuant_v4(CustomOp):
pass # Version 4, covers opset v4+
"""

def __init__(self, onnx_node, onnx_opset_version=get_preferred_qonnx_opset()):
super().__init__()
self.onnx_node = onnx_node
self.onnx_opset_version = onnx_opset_version
Expand Down
24 changes: 15 additions & 9 deletions src/qonnx/custom_op/channels_last/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
# Importing registers CustomOps in qonnx.custom_op.channels_last domain
from qonnx.custom_op.channels_last.batch_normalization import BatchNormalization
from qonnx.custom_op.channels_last.conv import Conv
from qonnx.custom_op.channels_last.max_pool import MaxPool
from qonnx.custom_op.channels_last.batch_normalization import (
BatchNormalization_v1,
BatchNormalization_v9,
BatchNormalization_v14,
)
from qonnx.custom_op.channels_last.conv import Conv_v1
from qonnx.custom_op.channels_last.max_pool import MaxPool_v1, MaxPool_v10

# Legacy dictionary for backward compatibility
custom_op = {
"Conv": Conv,
"MaxPool": MaxPool,
"BatchNormalization": BatchNormalization,
}
__all__ = [
"Conv_v1",
"MaxPool_v1",
"MaxPool_v10",
"BatchNormalization_v1",
"BatchNormalization_v9",
"BatchNormalization_v14",
]
12 changes: 11 additions & 1 deletion src/qonnx/custom_op/channels_last/batch_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from qonnx.custom_op.channels_last.base_wrapped_op import ChannelsLastWrappedOp


class BatchNormalization(ChannelsLastWrappedOp):
class BatchNormalization_v1(ChannelsLastWrappedOp):
def get_nodeattr_types(self):
"""Returns a dict of permitted attributes for node, where:
ret_dict[attribute_name] = (dtype, require, default_value, <allowed_values>)
Expand Down Expand Up @@ -133,3 +133,13 @@ def verify_node(self):
)

return info_messages


class BatchNormalization_v9(BatchNormalization_v1):
# no relevant changes for channels-last wrapper
pass


class BatchNormalization_v14(BatchNormalization_v9):
# no relevant changes for channels-last wrapper
pass
2 changes: 1 addition & 1 deletion src/qonnx/custom_op/channels_last/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from qonnx.custom_op.general.im2col import compute_conv_output_dim


class Conv(ChannelsLastWrappedOp):
class Conv_v1(ChannelsLastWrappedOp):
def get_nodeattr_types(self):
"""Returns a dict of permitted attributes for node, where:
ret_dict[attribute_name] = (dtype, require, default_value, <allowed_values>)
Expand Down
7 changes: 6 additions & 1 deletion src/qonnx/custom_op/channels_last/max_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from qonnx.custom_op.general.maxpoolnhwc import compute_pool_output_dim


class MaxPool(ChannelsLastWrappedOp):
class MaxPool_v1(ChannelsLastWrappedOp):
def get_nodeattr_types(self):
"""Returns a dict of permitted attributes for node, where:
ret_dict[attribute_name] = (dtype, require, default_value, <allowed_values>)
Expand Down Expand Up @@ -171,3 +171,8 @@ def verify_node(self):
)

return info_messages


class MaxPool_v10(MaxPool_v1):
# no relevant changes for channels-last wrapper
pass
31 changes: 15 additions & 16 deletions src/qonnx/custom_op/general/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,23 +35,22 @@
from qonnx.custom_op.general.intquant import IntQuant
from qonnx.custom_op.general.maxpoolnhwc import MaxPoolNHWC
from qonnx.custom_op.general.multithreshold import MultiThreshold
from qonnx.custom_op.general.quantavgpool2d import QuantAvgPool2d
from qonnx.custom_op.general.quant import Quant
from qonnx.custom_op.general.quantavgpool2d import QuantAvgPool2d
from qonnx.custom_op.general.trunc import Trunc
from qonnx.custom_op.general.xnorpopcount import XnorPopcountMatMul

# Legacy dictionary for backward compatibility
custom_op = {
"DebugMarker": DebugMarker,
"QuantAvgPool2d": QuantAvgPool2d,
"MaxPoolNHWC": MaxPoolNHWC,
"GenericPartition": GenericPartition,
"MultiThreshold": MultiThreshold,
"XnorPopcountMatMul": XnorPopcountMatMul,
"Im2Col": Im2Col,
"IntQuant": IntQuant,
"Quant": IntQuant, # Alias
"Trunc": Trunc,
"BipolarQuant": BipolarQuant,
"FloatQuant": FloatQuant,
}
__all__ = [
"BipolarQuant",
"DebugMarker",
"FloatQuant",
"GenericPartition",
"Im2Col",
"IntQuant",
"MaxPoolNHWC",
"MultiThreshold",
"Quant",
"QuantAvgPool2d",
"Trunc",
"XnorPopcountMatMul",
]
5 changes: 1 addition & 4 deletions src/qonnx/custom_op/general/maxpoolnhwc.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,7 @@ def execute_node(self, context, graph):
inp_vi = helper.make_tensor_value_info(inp_name, TensorProto.FLOAT, inp.shape)
out_vi = helper.make_tensor_value_info(out_name, TensorProto.FLOAT, dummy_out.shape)
tmp_graph = helper.make_graph(nodes=[node], name="tmp_graph", inputs=[inp_vi], outputs=[out_vi])
opset_version = self.onnx_opset_version
opset_imports = [helper.make_opsetid("", opset_version)]
onnx_kwargs = {"opset_imports": opset_imports}
tmp_model = qonnx_make_model(tmp_graph, producer_name="finn", **onnx_kwargs)
tmp_model = qonnx_make_model(tmp_graph, producer_name="finn")
tmp_model = ModelWrapper(tmp_model)
new_ctx = {inp_name: inp}
from qonnx.core.onnx_exec import execute_onnx
Expand Down
4 changes: 2 additions & 2 deletions src/qonnx/custom_op/general/quantavgpool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
from qonnx.core.datatype import DataType
from qonnx.custom_op.base import CustomOp
from qonnx.custom_op.general.maxpoolnhwc import compute_pool_output_dim
from qonnx.util.basic import qonnx_make_model
from qonnx.util.basic import get_preferred_onnx_opset, qonnx_make_model


class QuantAvgPool2d(CustomOp):
Expand Down Expand Up @@ -132,7 +132,7 @@ def execute_node(self, context, graph):
outputs=[outp],
)

opset_version = self.onnx_opset_version
opset_version = get_preferred_onnx_opset()
opset_imports = [helper.make_opsetid("", opset_version)]
onnx_kwargs = {"opset_imports": opset_imports}
model_avgpool = qonnx_make_model(graph_avgpool, **onnx_kwargs)
Expand Down
Loading
Loading