diff --git a/backends/arm/test/models/test_conformer.py b/backends/arm/test/models/test_conformer.py index 5b9a50f08e8..2448ea47b40 100644 --- a/backends/arm/test/models/test_conformer.py +++ b/backends/arm/test/models/test_conformer.py @@ -3,32 +3,37 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple + +import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) from torchaudio.models import Conformer +input_t = Tuple[torch.Tensor, torch.IntTensor] # Input x, y + def get_test_inputs(dim, lengths, num_examples): return (torch.rand(num_examples, int(lengths.max()), dim), lengths) -class TestConformer(unittest.TestCase): +class TestConformer: """Tests Torchaudio Conformer""" # Adjust nbr below as we increase op support. Note: most of the delegates # calls are directly consecutive to each other in the .pte. The reason # for that is some assert ops are removed by passes in the # .to_executorch step, i.e. after Arm partitioner. - ops_after_partitioner = { - "executorch_exir_dialects_edge__ops_aten_max_default": 1, - "torch.ops.aten._assert_scalar.default": 7, - "torch.ops.aten._local_scalar_dense.default": 1, - } + aten_ops = ["torch.ops.aten._assert_scalar.default"] dim = 16 num_examples = 10 @@ -43,96 +48,87 @@ class TestConformer(unittest.TestCase): ) conformer = conformer.eval() - def test_conformer_tosa_MI(self): - ( - ArmTester( - self.conformer, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec(tosa_spec="TOSA-0.80+MI"), - ) - .export() - .to_edge_transform_and_lower() - .dump_operator_distribution() - .check_count(self.ops_after_partitioner) - .to_executorch() - # TODO(MLETORCH-632): Fix numerical errors - .run_method_and_compare_outputs( - rtol=1.0, - atol=5.0, - inputs=get_test_inputs(self.dim, self.lengths, self.num_examples), - ) - ) - - def test_conformer_tosa_BI(self): - ( - ArmTester( - self.conformer, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec(tosa_spec="TOSA-0.80+BI"), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .run_method_and_compare_outputs( - qtol=1.0, - rtol=1.0, - atol=5.0, - inputs=get_test_inputs(self.dim, self.lengths, self.num_examples), - ) - ) - - def test_conformer_u55_BI(self): - tester = ( - ArmTester( - self.conformer, - example_inputs=self.model_example_inputs, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .serialize() - ) - - if conftest.is_option_enabled("corstone_fvp"): - try: - tester.run_method_and_compare_outputs( - qtol=1.0, - rtol=1.0, - atol=5.0, - inputs=get_test_inputs(self.dim, self.lengths, self.num_examples), - ) - self.fail( - "TODO(MLETORCH-635): Expected failure under FVP option, but test passed." - ) - except Exception: - pass - - def test_conformer_u85_BI(self): - tester = ( - ArmTester( - self.conformer, - example_inputs=self.model_example_inputs, - compile_spec=common.get_u85_compile_spec(), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - try: - tester.run_method_and_compare_outputs( - qtol=1.0, - rtol=1.0, - atol=5.0, - inputs=get_test_inputs(self.dim, self.lengths, self.num_examples), - ) - self.fail( - "TODO(MLETORCH-635): Expected failure under FVP option, but test passed." - ) - except Exception: - pass + +def test_conformer_tosa_MI(): + pipeline = TosaPipelineMI[input_t]( + TestConformer.conformer, + TestConformer.model_example_inputs, + aten_op=TestConformer.aten_ops, + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", + get_test_inputs( + TestConformer.dim, TestConformer.lengths, TestConformer.num_examples + ), + rtol=1.0, + atol=5.0, + ) + pipeline.run() + + +def test_conformer_tosa_BI(): + pipeline = TosaPipelineBI[input_t]( + TestConformer.conformer, + TestConformer.model_example_inputs, + aten_op=TestConformer.aten_ops, + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.pop_stage("check_count.exir") + pipeline.change_args( + "run_method_and_compare_outputs", + get_test_inputs( + TestConformer.dim, TestConformer.lengths, TestConformer.num_examples + ), + rtol=1.0, + atol=5.0, + ) + pipeline.run() + + +@common.XfailIfNoCorstone300 +@pytest.mark.xfail( + reason="TODO(MLETORCH-635): Expected failure under FVP option, but test passed." +) +def test_conformer_u55_BI(): + pipeline = EthosU55PipelineBI[input_t]( + TestConformer.conformer, + TestConformer.model_example_inputs, + aten_ops=TestConformer.aten_ops, + exir_ops=[], + use_to_edge_transform_and_lower=True, + run_on_fvp=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", + get_test_inputs( + TestConformer.dim, TestConformer.lengths, TestConformer.num_examples + ), + rtol=1.0, + atol=5.0, + ) + pipeline.run() + + +@common.XfailIfNoCorstone320 +@pytest.mark.xfail(reason="All IO needs to have the same data type (MLETORCH-635)") +def test_conformer_u85_BI(): + pipeline = EthosU85PipelineBI[input_t]( + TestConformer.conformer, + TestConformer.model_example_inputs, + aten_ops=TestConformer.aten_ops, + exir_ops=[], + use_to_edge_transform_and_lower=True, + run_on_fvp=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", + get_test_inputs( + TestConformer.dim, TestConformer.lengths, TestConformer.num_examples + ), + rtol=1.0, + atol=5.0, + ) + pipeline.run() diff --git a/backends/arm/test/models/test_dl3_arm.py b/backends/arm/test/models/test_dl3_arm.py index 93fb1483017..2e7a3117865 100644 --- a/backends/arm/test/models/test_dl3_arm.py +++ b/backends/arm/test/models/test_dl3_arm.py @@ -3,92 +3,87 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple import pytest -from executorch.backends.arm.test import common, conftest +import torch + +from executorch.backends.arm.test import common + +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) -from executorch.backends.arm.test.tester.arm_tester import ArmTester from executorch.examples.models import deeplab_v3 +input_t = Tuple[torch.Tensor] # Input x + -class TestDl3(unittest.TestCase): +class TestDl3: """Tests DeepLabv3.""" dl3 = deeplab_v3.DeepLabV3ResNet50Model() model_example_inputs = dl3.get_example_inputs() dl3 = dl3.get_eager_model() - @unittest.expectedFailure - def test_dl3_tosa_MI(self): - ( - ArmTester( - self.dl3, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge_transform_and_lower() - .to_executorch() - .run_method_and_compare_outputs(inputs=self.dl3.get_example_inputs()) - ) - - @unittest.expectedFailure - def test_dl3_tosa_BI(self): - ( - ArmTester( - self.dl3, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .run_method_and_compare_outputs( - atol=1.0, qtol=1, inputs=self.dl3.get_example_inputs() - ) - ) - - @pytest.mark.slow - @pytest.mark.corstone_fvp - @unittest.skip - def test_dl3_u55_BI(self): - tester = ( - ArmTester( - self.dl3, - example_inputs=self.model_example_inputs, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - atol=1.0, qtol=1, inputs=self.dl3.get_example_inputs() - ) - - @pytest.mark.slow - @pytest.mark.corstone_fvp - @unittest.skip - def test_dl3_u85_BI(self): - tester = ( - ArmTester( - self.dl3, - example_inputs=self.model_example_inputs, - compile_spec=common.get_u85_compile_spec(), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - atol=1.0, qtol=1, inputs=self.dl3.get_example_inputs() - ) + +def test_dl3_tosa_MI(): + pipeline = TosaPipelineMI[input_t]( + TestDl3.dl3, + TestDl3.model_example_inputs, + aten_op=[], + exir_op=[], + ) + pipeline.change_args( + "run_method_and_compare_outputs", rtol=1.0, atol=1.0 + ) # TODO: MLETORCH-1036 decrease tolerance + pipeline.run() + + +def test_dl3_tosa_BI(): + pipeline = TosaPipelineBI[input_t]( + TestDl3.dl3, + TestDl3.model_example_inputs, + aten_op=[], + exir_op=[], + ) + pipeline.change_args( + "run_method_and_compare_outputs", rtol=1.0, atol=1.0 + ) # TODO: MLETORCH-1036 decrease tolerance + pipeline.run() + + +@common.XfailIfNoCorstone300 +@pytest.mark.skip(reason="upsample_bilinear2d operator is not supported on U55") +def test_dl3_u55_BI(): + pipeline = EthosU55PipelineBI[input_t]( + TestDl3.dl3, + TestDl3.model_example_inputs, + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", rtol=1.0, atol=1.0 + ) # TODO: MLETORCH-1036 decrease tolerance + pipeline.run() + + +@common.XfailIfNoCorstone320 +@pytest.mark.skip(reason="Runs out of memory on U85") +def test_dl3_u85_BI(): + pipeline = EthosU85PipelineBI[input_t]( + TestDl3.dl3, + TestDl3.model_example_inputs, + aten_ops=[], + exir_ops=[], + run_on_fvp=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", rtol=1.0, atol=1.0 + ) # TODO: MLETORCH-1036 decrease tolerance + pipeline.run() diff --git a/backends/arm/test/models/test_llama.py b/backends/arm/test/models/test_llama.py index f5d879b3b8b..494bef48444 100644 --- a/backends/arm/test/models/test_llama.py +++ b/backends/arm/test/models/test_llama.py @@ -5,22 +5,29 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. + import logging import os import sys -import unittest + +from typing import Tuple import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test import conftest +from executorch.backends.arm.test.tester.test_pipeline import ( + TosaPipelineBI, + TosaPipelineMI, +) + from executorch.examples.models.llama.export_llama_lib import ( build_args_parser, get_llama_model, ) +input_t = Tuple[torch.Tensor] # Add project dir to sys path to workaround importlib.import_module() conditions in model_factory.py this_files_dir = os.path.dirname(os.path.abspath(__file__)) @@ -30,7 +37,7 @@ logger = logging.getLogger(__name__) -class TestLlama(unittest.TestCase): +class TestLlama: """ Test class of Llama models. Type of Llama model depends on command line parameters: --llama_inputs @@ -39,7 +46,6 @@ class TestLlama(unittest.TestCase): """ def prepare_model(self): - checkpoint = None params_file = None usage = "To run use --llama_inputs <.pt/.pth> <.json> " @@ -88,53 +94,47 @@ def prepare_model(self): return llama_model, llama_inputs, llama_meta - def test_llama_tosa_MI(self): - llama_model, llama_inputs, llama_meta = self.prepare_model() - - if llama_model is None or llama_inputs is None: - pytest.skip("Missing model and/or input files") - with torch.no_grad(): - ( - ArmTester( - llama_model, - example_inputs=llama_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - constant_methods=llama_meta, - ) - .export() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs( - inputs=llama_inputs, - atol=4.3, - rtol=1.1, # TODO: MLETORCH-825 decrease tolerance - ) - ) - - def test_llama_tosa_BI(self): - llama_model, llama_inputs, llama_meta = self.prepare_model() - - if llama_model is None or llama_inputs is None: - pytest.skip("Missing model and/or input files") - - with torch.no_grad(): - ( - ArmTester( - llama_model, - example_inputs=llama_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - constant_methods=llama_meta, - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs( - inputs=llama_inputs, - atol=9.9, - rtol=1.5, # TODO: Tolerance needs to be updated after MLETORCH-907 - ) - ) +def test_llama_tosa_MI(): + llama_model, llama_inputs, llama_meta = TestLlama().prepare_model() + + if llama_model is None or llama_inputs is None: + pytest.skip("Missing model and/or input files") + + with torch.no_grad(): + pipeline = TosaPipelineMI[input_t]( + llama_model, + llama_inputs, + aten_op=[], + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", + atol=4.3, + rtol=1.1, # TODO: MLETORCH-825 decrease tolerance + ) + pipeline.run() + + +@pytest.mark.xfail(reason="KeyError: scalar_tensor_1 (MLETORCH-907)") +def test_llama_tosa_BI(): + llama_model, llama_inputs, llama_meta = TestLlama.prepare_model() + + if llama_model is None or llama_inputs is None: + pytest.skip("Missing model and/or input files") + + with torch.no_grad(): + pipeline = TosaPipelineBI[input_t]( + llama_model, + llama_inputs, + aten_op=[], + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", + atol=9.9, + rtol=1.5, # TODO: Tolerance needs to be updated after MLETORCH-907 + ) + pipeline.run() diff --git a/backends/arm/test/models/test_lstm_arm.py b/backends/arm/test/models/test_lstm_arm.py index 1304de7eae1..48d2e918ff6 100644 --- a/backends/arm/test/models/test_lstm_arm.py +++ b/backends/arm/test/models/test_lstm_arm.py @@ -1,20 +1,24 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. - -import unittest +from typing import Tuple import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) from torch.nn.quantizable.modules import rnn +input_t = Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] # (h0, c0) + def get_test_inputs(): return ( @@ -23,7 +27,7 @@ def get_test_inputs(): ) -class TestLSTM(unittest.TestCase): +class TestLSTM: """Tests quantizable LSTM module.""" """ @@ -37,69 +41,60 @@ class TestLSTM(unittest.TestCase): # Used e.g. for quantization calibration and shape extraction in the tester model_example_inputs = get_test_inputs() - def test_lstm_tosa_MI(self): - ( - ArmTester( - self.lstm, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=get_test_inputs()) - ) - - def test_lstm_tosa_BI(self): - ( - ArmTester( - self.lstm, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(atol=3e-1, qtol=1, inputs=get_test_inputs()) - ) - - def test_lstm_u55_BI(self): - tester = ( - ArmTester( - self.lstm, - example_inputs=self.model_example_inputs, - compile_spec=common.get_u55_compile_spec(), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - atol=3e-1, qtol=1, inputs=get_test_inputs() - ) - - def test_lstm_u85_BI(self): - tester = ( - ArmTester( - self.lstm, - example_inputs=self.model_example_inputs, - compile_spec=common.get_u85_compile_spec(), - ) - .quantize() - .export() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .serialize() - ) - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - atol=3e-1, qtol=1, inputs=get_test_inputs() - ) + +def test_lstm_tosa_MI(): + pipeline = TosaPipelineMI[input_t]( + TestLSTM.lstm, + TestLSTM.model_example_inputs, + aten_op=[], + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.change_args("run_method_and_compare_outputs", get_test_inputs(), atol=3e-1) + pipeline.run() + + +def test_lstm_tosa_BI(): + pipeline = TosaPipelineBI[input_t]( + TestLSTM.lstm, + TestLSTM.model_example_inputs, + aten_op=[], + exir_op=[], + use_to_edge_transform_and_lower=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 + ) + pipeline.run() + + +@common.XfailIfNoCorstone300 +def test_lstm_u55_BI(): + pipeline = EthosU55PipelineBI[input_t]( + TestLSTM.lstm, + TestLSTM.model_example_inputs, + aten_ops=[], + exir_ops=[], + use_to_edge_transform_and_lower=True, + run_on_fvp=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 + ) + pipeline.run() + + +@common.XfailIfNoCorstone320 +def test_lstm_u85_BI(): + pipeline = EthosU85PipelineBI[input_t]( + TestLSTM.lstm, + TestLSTM.model_example_inputs, + aten_ops=[], + exir_ops=[], + use_to_edge_transform_and_lower=True, + run_on_fvp=True, + ) + pipeline.change_args( + "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 + ) + pipeline.run() diff --git a/backends/arm/test/models/test_w2l_arm.py b/backends/arm/test/models/test_w2l_arm.py index 8cd2ff22b75..1a755937482 100644 --- a/backends/arm/test/models/test_w2l_arm.py +++ b/backends/arm/test/models/test_w2l_arm.py @@ -11,12 +11,18 @@ import pytest import torch -from executorch.backends.arm.test import common, conftest -from executorch.backends.arm.test.tester.arm_tester import ArmTester +from executorch.backends.arm.test import common +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU55PipelineBI, + EthosU85PipelineBI, + TosaPipelineBI, + TosaPipelineMI, +) -from executorch.exir.backend.compile_spec_schema import CompileSpec from torchaudio import models +input_t = Tuple[torch.Tensor] # Input x + def get_test_inputs(batch_size, num_features, input_frames): return (torch.randn(batch_size, num_features, input_frames),) @@ -32,114 +38,66 @@ class TestW2L(unittest.TestCase): w2l = models.Wav2Letter(num_features=num_features).eval() model_example_inputs = get_test_inputs(batch_size, num_features, input_frames) - all_operators = { + all_operators = [ "executorch_exir_dialects_edge__ops_aten_convolution_default", "executorch_exir_dialects_edge__ops_aten__log_softmax_default", "executorch_exir_dialects_edge__ops_aten_relu_default", - } - - operators_after_quantization = all_operators - { - "executorch_exir_dialects_edge__ops_aten__log_softmax_default", - } - - @pytest.mark.slow # about 3min on std laptop - def test_w2l_tosa_MI(self): - ( - ArmTester( - self.w2l, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .dump_operator_distribution() - .to_edge_transform_and_lower() - .dump_operator_distribution() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs( - inputs=get_test_inputs( - self.batch_size, self.num_features, self.input_frames - ) - ) - ) - - @pytest.mark.slow # about 1min on std laptop - def test_w2l_tosa_BI(self): - ( - ArmTester( - self.w2l, - example_inputs=self.model_example_inputs, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), - ) - .quantize() - .export() - .dump_operator_distribution() - .to_edge_transform_and_lower() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs( - atol=0.1, - qtol=1, - inputs=get_test_inputs( - self.batch_size, self.num_features, self.input_frames - ), - ) - ) - - def _test_w2l_ethos_BI_pipeline( - self, - module: torch.nn.Module, - test_data: Tuple[torch.Tensor], - compile_spec: CompileSpec, - ): - tester = ( - ArmTester(module, example_inputs=test_data, compile_spec=compile_spec) - .quantize() - .export() - .to_edge() - .check(list(self.operators_after_quantization)) - .partition() - .to_executorch() - .serialize() - ) - return tester - - # TODO: expected fail as TOSA.Transpose is not supported by Ethos-U55 - @pytest.mark.slow - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP - def test_w2l_u55_BI(self): - tester = self._test_w2l_ethos_BI_pipeline( - self.w2l, - self.model_example_inputs, - common.get_u55_compile_spec(), - ) - - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - atol=1.0, - qtol=1, - inputs=get_test_inputs( - self.batch_size, self.num_features, self.input_frames - ), - ) - - @pytest.mark.slow - @pytest.mark.corstone_fvp - @conftest.expectedFailureOnFVP # TODO: MLETORCH-761 - @pytest.mark.skip(reason="Intermittent timeout issue: MLETORCH-856") - def test_w2l_u85_BI(self): - tester = self._test_w2l_ethos_BI_pipeline( - self.w2l, - self.model_example_inputs, - common.get_u85_compile_spec(), - ) - - if conftest.is_option_enabled("corstone_fvp"): - tester.run_method_and_compare_outputs( - atol=1.0, - qtol=1, - inputs=get_test_inputs( - self.batch_size, self.num_features, self.input_frames - ), - ) + ] + + +@pytest.mark.slow # about 3min on std laptop +def test_w2l_tosa_MI(): + pipeline = TosaPipelineMI[input_t]( + TestW2L.w2l, + TestW2L.model_example_inputs, + aten_op=[], + exir_op=TestW2L.all_operators, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@pytest.mark.slow # about 1min on std laptop +@pytest.mark.flaky +def test_w2l_tosa_BI(): + pipeline = TosaPipelineBI[input_t]( + TestW2L.w2l, + TestW2L.model_example_inputs, + aten_op=[], + exir_op=TestW2L.all_operators, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@pytest.mark.slow +@common.XfailIfNoCorstone300 +@pytest.mark.xfail( + reason="MLETORCH-1009: Wav2Letter fails on U55 due to unsupported conditions", + strict=False, +) +def test_w2l_u55_BI(): + pipeline = EthosU55PipelineBI[input_t]( + TestW2L.w2l, + TestW2L.model_example_inputs, + aten_ops=[], + exir_ops=[], + use_to_edge_transform_and_lower=True, + run_on_fvp=True, + ) + pipeline.run() + + +@pytest.mark.slow +@common.XfailIfNoCorstone320 +@pytest.mark.skip(reason="Intermittent timeout issue: MLETORCH-856") +def test_w2l_u85_BI(): + pipeline = EthosU85PipelineBI[input_t]( + TestW2L.w2l, + TestW2L.model_example_inputs, + aten_ops=[], + exir_ops=[], + use_to_edge_transform_and_lower=True, + run_on_fvp=True, + ) + pipeline.run()