diff --git a/backends/nxp/backend/edge_program_converter.py b/backends/nxp/backend/edge_program_converter.py index 03d55548d2d..fcfb9787715 100644 --- a/backends/nxp/backend/edge_program_converter.py +++ b/backends/nxp/backend/edge_program_converter.py @@ -34,6 +34,7 @@ exir_ops.edge.aten.avg_pool2d.default: AvgPool2dConverter, # noqa F405 exir_ops.edge.aten.cat.default: CatConverter, # noqa F405 exir_ops.edge.aten.clone.default: CloneConverter, # noqa F405 + exir_ops.edge.dim_order_ops._clone_dim_order.default: CloneConverter, # noqa F405 exir_ops.edge.aten.constant_pad_nd.default: ConstantPadNDConverter, # noqa F405 exir_ops.edge.aten.convolution.default: ConvolutionConverter, # noqa F405 exir_ops.edge.aten.hardtanh.default: HardTanhConverter, # noqa F405 diff --git a/backends/nxp/backend/ir/converter/node_converters/ops_converters/clone_converter.py b/backends/nxp/backend/ir/converter/node_converters/ops_converters/clone_converter.py index 1d370ab8c48..17b2cee9874 100644 --- a/backends/nxp/backend/ir/converter/node_converters/ops_converters/clone_converter.py +++ b/backends/nxp/backend/ir/converter/node_converters/ops_converters/clone_converter.py @@ -20,6 +20,11 @@ def _has_supported_memory_format(node: Node) -> bool: class CloneConverter(NodeConverter): + """ + This converter is responsible for converting both edge operators: + - aten.clone.default + - dim_order_ops._clone_dim_order.default + """ @staticmethod def _is_supported_in_IR( diff --git a/backends/nxp/neutron_partitioner.py b/backends/nxp/neutron_partitioner.py index e7ad7ff7a0b..965ad41309b 100644 --- a/backends/nxp/neutron_partitioner.py +++ b/backends/nxp/neutron_partitioner.py @@ -201,6 +201,7 @@ def tag_qdq_clusters(self, nodes: list[torch.fx.Node]): exir_ops.edge.aten.avg_pool2d.default: AvgPool2dConverter, # noqa F405 exir_ops.edge.aten.cat.default: CatConverter, # noqa F405 exir_ops.edge.aten.clone.default: CloneConverter, # noqa F405 + exir_ops.edge.dim_order_ops._clone_dim_order.default: CloneConverter, # noqa F405 exir_ops.edge.aten.constant_pad_nd.default: ConstantPadNDConverter, # noqa F405 exir_ops.edge.aten.convolution.default: ConvolutionConverter, # noqa F405 exir_ops.edge.aten.hardtanh.default: HardTanhConverter, # noqa F405 diff --git a/backends/nxp/tests/executors.py b/backends/nxp/tests/executors.py index 9626a2779c4..632e3da055f 100644 --- a/backends/nxp/tests/executors.py +++ b/backends/nxp/tests/executors.py @@ -368,7 +368,13 @@ def convert_run_compare( def graph_contains_any_of_ops(graph: Graph, ops: list) -> bool: - return any(node.target in ops for node in graph.nodes) + return graph_contains_any( + graph, condition=lambda n: hasattr(n, "target") and n.target in ops + ) + + +def graph_contains_any(graph: Graph, condition: Callable[[Node], bool]) -> bool: + return any(map(condition, graph.nodes)) target_support_check_function = Callable[[Node, NeutronTargetSpec], bool] diff --git a/backends/nxp/tests/ir/converter/node_converter/test_clone_converter.py b/backends/nxp/tests/ir/converter/node_converter/test_clone_converter.py index f5945607f1b..c02d184c5ae 100644 --- a/backends/nxp/tests/ir/converter/node_converter/test_clone_converter.py +++ b/backends/nxp/tests/ir/converter/node_converter/test_clone_converter.py @@ -4,31 +4,33 @@ # LICENSE file in the root directory of this source tree. +import itertools +import unittest + +import kgb import numpy as np -import pytest import torch from executorch.backends.nxp.backend.edge_program_converter import ( EdgeProgramToIRConverter, ) -from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program +from executorch.backends.nxp.tests.executorch_pipeline import ( + to_edge_program, + to_quantized_edge_program, +) from executorch.backends.nxp.tests.executors import ( convert_run_compare, + graph_contains_any, graph_contains_any_of_ops, - ToNCHWPreprocess, - ToNHWCPreprocess, + ToChannelFirstPreprocess, + ToChannelLastPreprocess, ) from executorch.exir.dialects._ops import ops as exir_ops +from parameterized import parameterized from torch import nn from torch.export import ExportedProgram -@pytest.fixture(autouse=True) -def reseed_model_per_test_run(): - torch.manual_seed(23) - np.random.seed(23) - - class SingleConvBlockWithDropout(torch.nn.Module): def __init__( self, conv_in_channels: int = 3, perform_inplace_dropout: bool = False @@ -74,57 +76,108 @@ def forward(self, x): return self.block(x) -@pytest.mark.parametrize("inplace_dropout", [False, True]) -@pytest.mark.parametrize("input_shape", [(1, 3, 128, 128), (1, 3, 256, 256)]) -def test_conv_dropout_quant(mocker, inplace_dropout: bool, input_shape: tuple[int]): - model = SingleConvBlockWithDropout( - conv_in_channels=input_shape[1], perform_inplace_dropout=inplace_dropout - ).eval() +class TestCloneConverter(unittest.TestCase): + __test__ = False # Prevent interfering with PyTest tests - converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") + @classmethod + def setUpClass(cls): + torch.manual_seed(23) + np.random.seed(23) - quantized_program = to_quantized_edge_program(model, input_shape).exported_program() + @staticmethod + def _node_is_clone(node) -> bool: + clone_ops = [ + exir_ops.edge.aten.clone.default, + exir_ops.edge.dim_order_ops._clone_dim_order.default, + ] - tflite_flatbuffers_model, io_formats = converter_spy.spy_return - exported_program: ExportedProgram = converter_spy.call_args.args[1] - - assert not graph_contains_any_of_ops( - graph=quantized_program.graph, ops=[exir_ops.edge.aten.clone.default] - ) - - input_data = (np.random.random(input_shape) * 50).astype(np.int8) - convert_run_compare( - exported_program, - tfl_model=tflite_flatbuffers_model, - tflite_input_preprocess=ToNHWCPreprocess(), - tflite_output_preprocess=ToNCHWPreprocess(), - input_data=input_data, - atol=1.0, - ) + def target_can_be_clone(node): + if hasattr(node, "op") and node.op == "call_function": + return "clone" in node.target.__name__ + return False -@pytest.mark.parametrize("inplace_dropout", [False, True]) -def test_clone_pool_view_copy_quant( - mocker, inplace_dropout: bool, input_shape: tuple[int] = (1, 64, 25, 5) -): - model = KWSFinalBlock(input_shape).eval() + return node in clone_ops or target_can_be_clone(node) - converter_spy = mocker.spy(EdgeProgramToIRConverter, "convert_program") - - quantized_program = to_quantized_edge_program(model, input_shape).exported_program() - - tflite_flatbuffers_model, io_formats = converter_spy.spy_return - exported_program: ExportedProgram = converter_spy.call_args.args[1] - - assert not graph_contains_any_of_ops( - graph=quantized_program.graph, ops=[exir_ops.edge.aten.clone.default] + @parameterized.expand( + list(itertools.product([True, False], [(1, 3, 128, 128), (1, 3, 256, 256)])) ) - - input_data = (np.random.random(input_shape) * 50).astype(np.int8) - convert_run_compare( - exported_program, - tfl_model=tflite_flatbuffers_model, - tflite_input_preprocess=ToNHWCPreprocess(), - input_data=input_data, - atol=1.0, + def test_conv_dropout_quant(self, inplace_dropout: bool, input_shape: tuple[int]): + model = SingleConvBlockWithDropout( + conv_in_channels=input_shape[1], perform_inplace_dropout=inplace_dropout + ).eval() + + with kgb.spy_on( + EdgeProgramToIRConverter.convert_program, call_original=True + ) as converter_spy: + quantized_program = to_quantized_edge_program( + model, input_shape + ).exported_program() + + tflite_flatbuffers_model, _ = converter_spy.calls[-1].return_value + exported_program: ExportedProgram = converter_spy.calls[-1].args[0] + + assert not graph_contains_any( + graph=quantized_program.graph, + condition=TestCloneConverter._node_is_clone, + ) + + input_data = (np.random.random(input_shape) * 50).astype(np.int8) + convert_run_compare( + exported_program, + tfl_model=tflite_flatbuffers_model, + tflite_input_preprocess=ToChannelLastPreprocess(), + tflite_output_preprocess=ToChannelFirstPreprocess(), + input_data=input_data, + atol=1.0, + ) + + @parameterized.expand( + list(itertools.product([True, False], [(1, 3, 128, 128), (1, 3, 256, 256)])) ) + def test_conv_dropout_no_quant( + self, inplace_dropout: bool, input_shape: tuple[int] + ): + model = SingleConvBlockWithDropout( + conv_in_channels=input_shape[1], perform_inplace_dropout=inplace_dropout + ).eval() + + edge_program = to_edge_program(model, input_shape).exported_program() + + has_clone = graph_contains_any_of_ops( + graph=edge_program.graph, + ops=[ + exir_ops.edge.aten.clone.default, + exir_ops.edge.dim_order_ops._clone_dim_order.default, + ], + ) + + # Clone with inplace=True should not produce clone edge op and vice versa + assert inplace_dropout ^ has_clone + + def test_clone_pool_view_copy_quant(self, input_shape: tuple[int] = (1, 64, 25, 5)): + model = KWSFinalBlock(input_shape).eval() + + with kgb.spy_on( + EdgeProgramToIRConverter.convert_program, call_original=True + ) as converter_spy: + quantized_program = to_quantized_edge_program( + model, input_shape + ).exported_program() + + tflite_flatbuffers_model, _ = converter_spy.calls[-1].return_value + exported_program: ExportedProgram = converter_spy.calls[-1].args[0] + + assert not graph_contains_any( + graph=quantized_program.graph, + condition=TestCloneConverter._node_is_clone, + ) + + input_data = (np.random.random(input_shape) * 50).astype(np.int8) + convert_run_compare( + exported_program, + tfl_model=tflite_flatbuffers_model, + tflite_input_preprocess=ToChannelLastPreprocess(), + input_data=input_data, + atol=1.0, + )