From bcbf24a861226e3f2863ecaeadcc0999e8de9643 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A5ns=20Nilsson?= Date: Mon, 10 Mar 2025 13:54:24 +0100 Subject: [PATCH] Arm backend: Add TOSA support for logical not MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I5bc8e68561e1403a2de388a3a0fd1f4435401ea9 Signed-off-by: Måns Nilsson --- .../operator_support/tosa_supported_operators.py | 2 ++ backends/arm/operators/ops_unary.py | 14 +++++++------- backends/arm/test/models/test_conformer.py | 1 - backends/arm/test/ops/test_logical.py | 16 ++++++++++++++++ 4 files changed, 25 insertions(+), 8 deletions(-) diff --git a/backends/arm/operator_support/tosa_supported_operators.py b/backends/arm/operator_support/tosa_supported_operators.py index e67e9ce7bc5..48bf07bc3a2 100644 --- a/backends/arm/operator_support/tosa_supported_operators.py +++ b/backends/arm/operator_support/tosa_supported_operators.py @@ -115,6 +115,7 @@ def is_node_supported( exir_ops.edge.aten.logical_and.default, exir_ops.edge.aten.logical_or.default, exir_ops.edge.aten.logical_xor.default, + exir_ops.edge.aten.logical_not.default, exir_ops.edge.aten.bitwise_and.Tensor, exir_ops.edge.aten.bitwise_or.Tensor, exir_ops.edge.aten.bitwise_xor.Tensor, @@ -199,6 +200,7 @@ def is_node_supported( exir_ops.edge.aten.logical_and.default, exir_ops.edge.aten.logical_or.default, exir_ops.edge.aten.logical_xor.default, + exir_ops.edge.aten.logical_not.default, exir_ops.edge.aten.amax.default, exir_ops.edge.aten.amin.default, exir_ops.edge.aten.eq.Tensor, diff --git a/backends/arm/operators/ops_unary.py b/backends/arm/operators/ops_unary.py index 31397b9a3b1..464bbe83925 100644 --- a/backends/arm/operators/ops_unary.py +++ b/backends/arm/operators/ops_unary.py @@ -14,17 +14,17 @@ ) from executorch.backends.arm.tosa_mapping import TosaArg -from executorch.backends.arm.tosa_specification import TosaSpecification from serializer.tosa_serializer import TosaOp def unary_operator_factory(unary_target: str, tosa_op): "Creates and registers NodeVisitors for operations that have one input and map directly into a TOSA op." - class UnaryOperator_080_MI(NodeVisitor): - target = unary_target + # Some TOSA unary operators only support float + fp_only_ops = ["aten.floor.default"] - tosa_specs = [TosaSpecification.create_from_string("TOSA-0.80+MI")] + class UnaryOperator(NodeVisitor): + target = unary_target def __init__(self, *args): super().__init__(*args) @@ -43,15 +43,15 @@ def define_node( f"Got {inputs[0].dtype=}, {output.dtype=}" ) - if not (inputs[0].dtype == ts.DType.FP32): + if self.target in fp_only_ops and not (inputs[0].dtype == ts.DType.FP32): raise ValueError( "All inputs need to be FP32." f"Got {inputs[0].dtype=}" ) - # MI lowering tosa_graph.addOperator(tosa_op, [inputs[0].name], [output.name]) - register_node_visitor(UnaryOperator_080_MI) + register_node_visitor(UnaryOperator) unary_operator_factory("aten.floor.default", TosaOp.Op().FLOOR) +unary_operator_factory("aten.logical_not.default", TosaOp.Op().LOGICAL_NOT) diff --git a/backends/arm/test/models/test_conformer.py b/backends/arm/test/models/test_conformer.py index d9bc4e363c1..0976b981f62 100644 --- a/backends/arm/test/models/test_conformer.py +++ b/backends/arm/test/models/test_conformer.py @@ -34,7 +34,6 @@ class TestConformer(unittest.TestCase): "executorch_exir_dialects_edge__ops_aten_max_default": 1, "executorch_exir_dialects_edge__ops_aten_eq_Scalar": 2, "executorch_exir_dialects_edge__ops_aten_where_self": 4, - "executorch_exir_dialects_edge__ops_aten_logical_not_default": 4, "executorch_exir_dialects_edge__ops_aten_any_dim": 2, "torch.ops.aten._assert_scalar.default": 10, "torch.ops.aten._local_scalar_dense.default": 1, diff --git a/backends/arm/test/ops/test_logical.py b/backends/arm/test/ops/test_logical.py index 58a3f535048..fd59bbbd263 100644 --- a/backends/arm/test/ops/test_logical.py +++ b/backends/arm/test/ops/test_logical.py @@ -40,6 +40,14 @@ def forward(self, tensor1: torch.Tensor, tensor2: torch.Tensor): return tensor1.logical_or(tensor2) +class Not(torch.nn.Module): + aten_op = "torch.ops.aten.logical_not.default" + exir_op = "executorch_exir_dialects_edge__ops_aten_logical_not_default" + + def forward(self, tensor: torch.Tensor): + return torch.logical_not(tensor) + + input_t2 = Tuple[torch.Tensor, torch.Tensor] # Input x, y @@ -64,6 +72,10 @@ def forward(self, tensor1: torch.Tensor, tensor2: torch.Tensor): test_data = { + "not_rank1": (Not(), test_input["rank1"][:-1]), + "not_rand_rank2": (Not(), test_input["rand_rank2"][:-1]), + "not_rand_rank3": (Not(), test_input["rand_rank3"][:-1]), + "not_rand_rank4": (Not(), test_input["rand_rank4"][:-1]), "and_rank1": (And(), test_input["rank1"]), "and_rand_rank2": (And(), test_input["rand_rank2"]), "and_rand_rank3": (And(), test_input["rand_rank3"]), @@ -80,6 +92,10 @@ def forward(self, tensor1: torch.Tensor, tensor2: torch.Tensor): fvp_xfails = { + "not_rank1": "MLETORCH-706 Support ScalarType::Bool in EthosUBackend.", + "not_rand_rank2": "MLETORCH-706: Support ScalarType::Bool in EthosUBackend.", + "not_rand_rank3": "MLETORCH-706: Support ScalarType::Bool in EthosUBackend.", + "not_rand_rank4": "MLETORCH-706: Support ScalarType::Bool in EthosUBackend.", "and_rank1": "MLETORCH-706 Support ScalarType::Bool in EthosUBackend.", "and_rand_rank2": "MLETORCH-706: Support ScalarType::Bool in EthosUBackend.", "and_rand_rank3": "MLETORCH-706: Support ScalarType::Bool in EthosUBackend.",