Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 1 addition & 10 deletions backends/arm/test/models/test_conformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,18 +136,9 @@ def test_conformer_vgf_INT():
exir_op=[],
tosa_version="TOSA-1.0+INT",
use_to_edge_transform_and_lower=True,
run_on_vulkan_runtime=False, # TODO: run on vulkan runtime
)
pipeline.pop_stage("check_count.exir")

# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs",
# get_test_inputs(
# TestConformer.dim, TestConformer.lengths, TestConformer.num_examples
# ),
# rtol=1.0,
# atol=3.0,
# )
pipeline.run()


Expand Down
9 changes: 1 addition & 8 deletions backends/arm/test/models/test_dl3_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,8 @@ def test_dl3_vgf_INT():
exir_op=[],
tosa_version="TOSA-1.0+INT",
use_to_edge_transform_and_lower=True,
run_on_vulkan_runtime=False, # TODO: run on vulkan runtime
)
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs", rtol=1.0, atol=1.0
# )
pipeline.run()


Expand All @@ -117,8 +114,4 @@ def test_dl3_vgf_FP():
tosa_version="TOSA-1.0+FP",
use_to_edge_transform_and_lower=True,
)
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs", rtol=1.0, atol=1.0
# )
pipeline.run()
8 changes: 0 additions & 8 deletions backends/arm/test/models/test_lstm_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,10 +111,6 @@ def test_lstm_vgf_INT():
tosa_version="TOSA-1.0+INT",
use_to_edge_transform_and_lower=True,
)
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
# )
pipeline.run()


Expand All @@ -128,8 +124,4 @@ def test_lstm_vgf_FP():
tosa_version="TOSA-1.0+FP",
use_to_edge_transform_and_lower=True,
)
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
# )
pipeline.run()
10 changes: 2 additions & 8 deletions backends/arm/test/models/test_mobilenet_v2_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,11 +127,8 @@ def test_mv2_vgf_INT(per_channel_quantization):
per_channel_quantization=per_channel_quantization,
atol=0.25,
qtol=1,
run_on_vulkan_runtime=False,
)
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
# )
pipeline.run()


Expand All @@ -144,9 +141,6 @@ def test_mv2_vgf_FP():
exir_op=[],
tosa_version="TOSA-1.0+FP",
use_to_edge_transform_and_lower=True,
run_on_vulkan_runtime=False,
)
# TODO: MLETORCH-1167 Create Vulkan backend e2e tests
# pipeline.change_args(
# "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0
# ) # TODO: MLETORCH-1036 decrease tolerance
pipeline.run()
2 changes: 2 additions & 0 deletions backends/arm/test/ops/test_addmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ def test_addmm_u85_INT(test_data: Tuple):

@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_addmm_vgf_FP(test_data: input_t1):
pipeline = VgfPipeline[input_t1](
Addmm(),
Expand All @@ -180,6 +181,7 @@ def test_addmm_vgf_FP(test_data: input_t1):

@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_addmm_vgf_INT(test_data: input_t1):
pipeline = VgfPipeline[input_t1](
Addmm(),
Expand Down
4 changes: 4 additions & 0 deletions backends/arm/test/ops/test_amax.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ def test_max_dim_tosa_FP_not_delegated():

@common.parametrize("test_data", Amax.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_amax_vgf_FP(test_data: Amax.input_t):
data, dim, keep_dims = test_data()
module = Amax(dim, keep_dims)
Expand All @@ -154,6 +155,7 @@ def test_amax_vgf_FP(test_data: Amax.input_t):

@common.parametrize("test_data", Amax.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_amax_vgf_INT(test_data: Amax.input_t):
data, dim, keep_dims = test_data()
module = Amax(dim, keep_dims)
Expand All @@ -168,6 +170,7 @@ def test_amax_vgf_INT(test_data: Amax.input_t):

@common.parametrize("test_data", Max.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_max_dim_vgf_FP_to_amax(test_data: Max.input_t):
data, dim = test_data()
pipeline = VgfPipeline[Max.input_t](
Expand All @@ -181,6 +184,7 @@ def test_max_dim_vgf_FP_to_amax(test_data: Max.input_t):

@common.parametrize("test_data", Max.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_max_dim_vgf_INT_to_amax(test_data: Max.input_t):
data, dim = test_data()
pipeline = VgfPipeline[Max.input_t](
Expand Down
4 changes: 4 additions & 0 deletions backends/arm/test/ops/test_amin.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ def test_min_dim_tosa_FP_not_delegated():

@common.parametrize("test_data", Amin.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_amin_vgf_FP(test_data: Amin.input_t):
data, dim, keep_dims = test_data()
pipeline = VgfPipeline[Amin.input_t](
Expand All @@ -162,6 +163,7 @@ def test_amin_vgf_FP(test_data: Amin.input_t):

@common.parametrize("test_data", Amin.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_amin_vgf_INT(test_data: Amin.input_t):
data, dim, keep_dims = test_data()
pipeline = VgfPipeline[Amin.input_t](
Expand All @@ -175,6 +177,7 @@ def test_amin_vgf_INT(test_data: Amin.input_t):

@common.parametrize("test_data", Min.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_min_dim_vgf_FP_to_amin(test_data: Min.input_t):
data, dim = test_data()
pipeline = VgfPipeline[Min.input_t](
Expand All @@ -188,6 +191,7 @@ def test_min_dim_vgf_FP_to_amin(test_data: Min.input_t):

@common.parametrize("test_data", Min.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_min_dim_vgf_INT_to_amin(test_data: Min.input_t):
data, dim = test_data()
pipeline = VgfPipeline[Min.input_t](
Expand Down
3 changes: 3 additions & 0 deletions backends/arm/test/ops/test_any.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from typing import List, Tuple

import pytest
import torch
from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
Expand Down Expand Up @@ -189,6 +190,7 @@ def test_any_u85_INT(test_data: input_t1):

@common.parametrize("test_data", test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_any_vgf_FP(test_data: input_t1):
op, data_fn = test_data()
pipeline = VgfPipeline[input_t1](
Expand All @@ -203,6 +205,7 @@ def test_any_vgf_FP(test_data: input_t1):

@common.parametrize("test_data", test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_any_vgf_INT(test_data: input_t1):
op, data_fn = test_data()
pipeline = VgfPipeline[input_t1](
Expand Down
2 changes: 0 additions & 2 deletions backends/arm/test/ops/test_bmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,4 @@ def test_bmm_vgf_INT_single_input(test_data: input_t1):
exir_op_bmm,
tosa_version="TOSA-1.0+INT",
)
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
# pipeline.change_args("run_method_and_compare_outputs", qtol=1)
pipeline.run()
2 changes: 0 additions & 2 deletions backends/arm/test/ops/test_clamp.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,4 @@ def test_clamp_vgf_INT(test_data):
exir_op,
tosa_version="TOSA-1.0+INT",
)
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
# pipeline.change_args("run_method_and_compare_outputs", qtol=1)
pipeline.run()
2 changes: 0 additions & 2 deletions backends/arm/test/ops/test_conv_combos.py
Original file line number Diff line number Diff line change
Expand Up @@ -581,8 +581,6 @@ def test_convolution_2d_vgf_INT_block_bottleneck(test_data):
tosa_version="TOSA-1.0+INT",
per_channel_quantization=per_channel_quantization,
)
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
# pipeline.change_args("run_method_and_compare_outputs", model.get_inputs(), qtol=1)
pipeline.run()


Expand Down
4 changes: 0 additions & 4 deletions backends/arm/test/ops/test_index_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,4 @@ def test_index_select_vgf_INT_rand(test_data: input_params):
op.exir_op,
tosa_version="TOSA-1.0+INT",
)
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
# pipeline.change_args(
# "run_method_and_compare_outputs", inputs=test_input, atol=0.9, rtol=0.2, qtol=1
# )
pipeline.run()
2 changes: 0 additions & 2 deletions backends/arm/test/ops/test_logsoftmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,4 @@ def test_log_softmax_vgf_INT(test_data):
tosa_version="TOSA-1.0+INT",
)
pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op])
# TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests
# pipeline.change_args("run_method_and_compare_outputs", qtol=1)
pipeline.run()
6 changes: 5 additions & 1 deletion backends/arm/test/ops/test_mean_dim.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import pytest
import torch
from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
Expand Down Expand Up @@ -86,6 +86,7 @@ def test_adaptive_avg_pool2d_u85_INT(test_data):

@common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_adaptive_avg_pool2d_vgf_FP(test_data):
pipeline = VgfPipeline[input_t](
AdaptiveAveragePool2d(),
Expand All @@ -99,6 +100,7 @@ def test_adaptive_avg_pool2d_vgf_FP(test_data):

@common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_adaptive_avg_pool2d_vgf_INT(test_data):
pipeline = VgfPipeline[input_t](
AdaptiveAveragePool2d(),
Expand Down Expand Up @@ -329,6 +331,7 @@ def test_mean_dim_u85_INT(test_data):

@common.parametrize("test_data", MeanDim.test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_mean_dim_vgf_FP(test_data):
test_data_val, dim, keep_dim = test_data()
pipeline = VgfPipeline[input_t](
Expand All @@ -343,6 +346,7 @@ def test_mean_dim_vgf_FP(test_data):

@common.parametrize("test_data", MeanDim.test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_mean_dim_vgf_INT(test_data):
test_data_val, dim, keep_dim = test_data()
pipeline = VgfPipeline[input_t](
Expand Down
4 changes: 3 additions & 1 deletion backends/arm/test/ops/test_scalar_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import pytest
import torch
from executorch.backends.arm.test import common

Expand Down Expand Up @@ -104,6 +104,7 @@ def test_scalar_tensor_u85_INT(test_data):

@common.parametrize("test_data", float_test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_scalar_tensor_vgf_FP(test_data):
scalar, dtype, data = test_data()
pipeline = VgfPipeline(
Expand All @@ -117,6 +118,7 @@ def test_scalar_tensor_vgf_FP(test_data):

@common.parametrize("test_data", int_test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_scalar_tensor_vgf_INT(test_data):
scalar, dtype, data = test_data()
pipeline = VgfPipeline(
Expand Down
5 changes: 5 additions & 0 deletions backends/arm/test/ops/test_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from typing import Tuple

import pytest
import torch

from executorch.backends.arm.test import common
Expand Down Expand Up @@ -173,6 +174,7 @@ def test_select_int_u85_INT(test_data: Tuple):

@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_select_int_vgf_FP_copy(test_data: Tuple):
pipeline = VgfPipeline[input_t1](
SelectCopy(), test_data(), aten_op_copy, [], tosa_version="TOSA-1.0+FP"
Expand All @@ -182,6 +184,7 @@ def test_select_int_vgf_FP_copy(test_data: Tuple):

@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_select_int_vgf_FP(test_data: Tuple):
pipeline = VgfPipeline[input_t1](
SelectInt(), test_data(), aten_op_int, [], tosa_version="TOSA-1.0+FP"
Expand All @@ -191,6 +194,7 @@ def test_select_int_vgf_FP(test_data: Tuple):

@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_select_int_vgf_INT_copy(test_data: Tuple):
pipeline = VgfPipeline[input_t1](
SelectCopy(),
Expand All @@ -204,6 +208,7 @@ def test_select_int_vgf_INT_copy(test_data: Tuple):

@common.parametrize("test_data", test_data_suite)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0")
def test_select_int_vgf_INT(test_data: Tuple):
pipeline = VgfPipeline[input_t1](
SelectInt(),
Expand Down
5 changes: 5 additions & 0 deletions backends/arm/test/ops/test_silu.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from typing import Optional, Tuple

import pytest
import torch
from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
Expand Down Expand Up @@ -116,6 +117,7 @@ def test_silu_u85_INT_inplace(test_data: input_t):

@common.parametrize("test_data", Silu.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1387: Output differs")
def test_silu_vgf_FP(test_data: input_t):
silu_data = (test_data(), False)
pipeline = VgfPipeline[input_t](
Expand All @@ -126,6 +128,7 @@ def test_silu_vgf_FP(test_data: input_t):

@common.parametrize("test_data", Silu.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1387: Output differs")
def test_silu_vgf_FP_inplace(test_data: input_t):
silu_data = (test_data(), True)
pipeline = VgfPipeline[input_t](
Expand All @@ -136,6 +139,7 @@ def test_silu_vgf_FP_inplace(test_data: input_t):

@common.parametrize("test_data", Silu.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1387: Output differs")
def test_silu_vgf_INT(test_data: input_t):
silu_data = (test_data(), False)
pipeline = VgfPipeline[input_t](
Expand All @@ -149,6 +153,7 @@ def test_silu_vgf_INT(test_data: input_t):

@common.parametrize("test_data", Silu.test_data)
@common.SkipIfNoModelConverter
@pytest.mark.xfail(reason="MLETORCH-1387: Output differs")
def test_silu_vgf_INT_inplace(test_data: input_t):
silu_data = (test_data(), True)
pipeline = VgfPipeline[input_t](
Expand Down
Loading
Loading