diff --git a/backends/arm/_passes/match_arg_ranks_pass.py b/backends/arm/_passes/match_arg_ranks_pass.py index 3554fc0954c..2f9209445d9 100644 --- a/backends/arm/_passes/match_arg_ranks_pass.py +++ b/backends/arm/_passes/match_arg_ranks_pass.py @@ -51,6 +51,7 @@ def __init__(self, exported_program): exir_ops.edge.aten.gt.Tensor, exir_ops.edge.aten.ge.Tensor, exir_ops.edge.aten.lt.Tensor, + exir_ops.edge.aten.le.Tensor, exir_ops.edge.aten.pow.Tensor_Tensor, exir_ops.edge.aten.where.self, ] diff --git a/backends/arm/_passes/replace_scalar_with_tensor_pass.py b/backends/arm/_passes/replace_scalar_with_tensor_pass.py index 1e8b2d6b651..185ce941247 100644 --- a/backends/arm/_passes/replace_scalar_with_tensor_pass.py +++ b/backends/arm/_passes/replace_scalar_with_tensor_pass.py @@ -32,6 +32,7 @@ exir_ops.edge.aten.gt.Scalar: exir_ops.edge.aten.gt.Tensor, exir_ops.edge.aten.ge.Scalar: exir_ops.edge.aten.ge.Tensor, exir_ops.edge.aten.lt.Scalar: exir_ops.edge.aten.lt.Tensor, + exir_ops.edge.aten.le.Scalar: exir_ops.edge.aten.le.Tensor, exir_ops.edge.aten.ne.Scalar: exir_ops.edge.aten.ne.Tensor, torch.ops.aten.add.Scalar: torch.ops.aten.add.Tensor, torch.ops.aten.sub.Scalar: torch.ops.aten.sub.Tensor, @@ -43,6 +44,7 @@ torch.ops.aten.gt.Scalar: torch.ops.aten.gt.Tensor, torch.ops.aten.ge.Scalar: torch.ops.aten.ge.Tensor, torch.ops.aten.lt.Scalar: torch.ops.aten.lt.Tensor, + torch.ops.aten.le.Scalar: torch.ops.aten.le.Tensor, torch.ops.aten.ne.Scalar: torch.ops.aten.ne.Tensor, } diff --git a/backends/arm/operator_support/ethos_u55_support.py b/backends/arm/operator_support/ethos_u55_support.py index 5a705acd877..22b64505c5b 100644 --- a/backends/arm/operator_support/ethos_u55_support.py +++ b/backends/arm/operator_support/ethos_u55_support.py @@ -138,6 +138,7 @@ class EthosU55NotSupported(OperatorSupportBase): exir_ops.edge.aten.gt.Tensor, exir_ops.edge.aten.gt.Scalar, exir_ops.edge.aten.le.Tensor, + exir_ops.edge.aten.le.Scalar, exir_ops.edge.aten.lt.Tensor, exir_ops.edge.aten.lt.Scalar, exir_ops.edge.aten.ne.Tensor, diff --git a/backends/arm/operator_support/tosa_supported_operators.py b/backends/arm/operator_support/tosa_supported_operators.py index b6671351bc5..9fcff7bdc5f 100644 --- a/backends/arm/operator_support/tosa_supported_operators.py +++ b/backends/arm/operator_support/tosa_supported_operators.py @@ -189,6 +189,7 @@ def is_node_supported( exir_ops.edge.aten.gt.Tensor, exir_ops.edge.aten.gt.Scalar, exir_ops.edge.aten.le.Tensor, + exir_ops.edge.aten.le.Scalar, exir_ops.edge.aten.lt.Tensor, exir_ops.edge.aten.lt.Scalar, exir_ops.edge.aten.mul.Tensor, diff --git a/backends/arm/test/ops/test_le.py b/backends/arm/test/ops/test_le.py index 217e409c6f5..b48bad8248b 100644 --- a/backends/arm/test/ops/test_le.py +++ b/backends/arm/test/ops/test_le.py @@ -15,13 +15,15 @@ TosaPipelineMI, ) -aten_op = "torch.ops.aten.le.Tensor" -exir_op = "executorch_exir_dialects_edge__ops_aten_le_Tensor" input_t = Tuple[torch.Tensor] -class GreaterEqual(torch.nn.Module): +class LessEqual(torch.nn.Module): + aten_op_tensor = "torch.ops.aten.le.Tensor" + aten_op_scalar = "torch.ops.aten.le.Scalar" + exir_op = "executorch_exir_dialects_edge__ops_aten_le_Tensor" + def __init__(self, input, other): super().__init__() self.input_ = input @@ -38,72 +40,151 @@ def get_inputs(self): return (self.input_, self.other_) -op_le_rank1_ones = GreaterEqual( +op_le_tensor_rank1_ones = LessEqual( torch.ones(5), torch.ones(5), ) -op_le_rank2_rand = GreaterEqual( +op_le_tensor_rank2_rand = LessEqual( torch.rand(4, 5), torch.rand(1, 5), ) -op_le_rank3_randn = GreaterEqual( +op_le_tensor_rank3_randn = LessEqual( torch.randn(10, 5, 2), torch.randn(10, 5, 2), ) -op_le_rank4_randn = GreaterEqual( +op_le_tensor_rank4_randn = LessEqual( torch.randn(3, 2, 2, 2), torch.randn(3, 2, 2, 2), ) -test_data_common = { - "le_rank1_ones": lambda: op_le_rank1_ones, - "le_rank2_rand": lambda: op_le_rank2_rand, - "le_rank3_randn": lambda: op_le_rank3_randn, - "le_rank4_randn": lambda: op_le_rank4_randn, +op_le_scalar_rank1_ones = LessEqual(torch.ones(5), 1.0) +op_le_scalar_rank2_rand = LessEqual(torch.rand(4, 5), 0.2) +op_le_scalar_rank3_randn = LessEqual(torch.randn(10, 5, 2), -0.1) +op_le_scalar_rank4_randn = LessEqual(torch.randn(3, 2, 2, 2), 0.3) + +test_data_tensor = { + "le_tensor_rank1_ones": lambda: op_le_tensor_rank1_ones, + "le_tensor_rank2_rand": lambda: op_le_tensor_rank2_rand, + "le_tensor_rank3_randn": lambda: op_le_tensor_rank3_randn, + "le_tensor_rank4_randn": lambda: op_le_tensor_rank4_randn, +} + +test_data_scalar = { + "le_scalar_rank1_ones": lambda: op_le_scalar_rank1_ones, + "le_scalar_rank2_rand": lambda: op_le_scalar_rank2_rand, + "le_scalar_rank3_randn": lambda: op_le_scalar_rank3_randn, + "le_scalar_rank4_randn": lambda: op_le_scalar_rank4_randn, } -@common.parametrize("test_module", test_data_common) +@common.parametrize("test_module", test_data_tensor) def test_le_tensor_tosa_MI(test_module): pipeline = TosaPipelineMI[input_t]( - test_module(), test_module().get_inputs(), aten_op, exir_op + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, ) pipeline.run() -@common.parametrize("test_module", test_data_common) +@common.parametrize("test_module", test_data_scalar) +def test_le_scalar_tosa_MI(test_module): + pipeline = TosaPipelineMI[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_scalar, + LessEqual.exir_op, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) def test_le_tensor_tosa_BI(test_module): pipeline = TosaPipelineBI[input_t]( - test_module(), test_module().get_inputs(), aten_op, exir_op + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, ) pipeline.run() -@common.parametrize("test_module", test_data_common) +@common.parametrize("test_module", test_data_scalar) +def test_le_scalar_tosa_BI(test_module): + pipeline = TosaPipelineBI[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_tensor) +@common.XfailIfNoCorstone300 def test_le_tensor_u55_BI_not_delegated(test_module): # GREATER_EQUAL is not supported on U55. LE uses the GREATER_EQUAL Tosa operator. pipeline = OpNotSupportedPipeline[input_t]( test_module(), test_module().get_inputs(), - {exir_op: 1}, + {LessEqual.exir_op: 1}, quantize=True, u55_subset=True, ) pipeline.run() +@common.parametrize("test_module", test_data_scalar) +@common.XfailIfNoCorstone300 +def test_le_scalar_u55_BI_not_delegated(test_module): + # GREATER_EQUAL is not supported on U55. LE uses the GREATER_EQUAL Tosa operator. + pipeline = OpNotSupportedPipeline[input_t]( + test_module(), + test_module().get_inputs(), + {LessEqual.exir_op: 1}, + n_expected_delegates=1, + quantize=True, + u55_subset=True, + ) + pipeline.dump_operator_distribution("export") + pipeline.run() + + @common.parametrize( "test_module", - test_data_common, - xfails={"le_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, + test_data_tensor, + xfails={ + "le_tensor_rank4_randn": "4D fails because boolean Tensors can't be subtracted" + }, ) @common.XfailIfNoCorstone320 def test_le_tensor_u85_BI(test_module): pipeline = EthosU85PipelineBI[input_t]( test_module(), test_module().get_inputs(), - aten_op, - exir_op, + LessEqual.aten_op_tensor, + LessEqual.exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize( + "test_module", + test_data_scalar, + xfails={ + "le_scalar_rank4_randn": "4D fails because boolean Tensors can't be subtracted" + }, +) +@common.XfailIfNoCorstone320 +def test_le_scalar_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module(), + test_module().get_inputs(), + LessEqual.aten_op_tensor, + LessEqual.exir_op, run_on_fvp=True, use_to_edge_transform_and_lower=True, )