diff --git a/backends/arm/test/models/test_conformer.py b/backends/arm/test/models/test_conformer.py index 3119145aef1..d92bbfec78b 100644 --- a/backends/arm/test/models/test_conformer.py +++ b/backends/arm/test/models/test_conformer.py @@ -136,18 +136,9 @@ def test_conformer_vgf_INT(): exir_op=[], tosa_version="TOSA-1.0+INT", use_to_edge_transform_and_lower=True, + run_on_vulkan_runtime=False, # TODO: run on vulkan runtime ) pipeline.pop_stage("check_count.exir") - - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", - # get_test_inputs( - # TestConformer.dim, TestConformer.lengths, TestConformer.num_examples - # ), - # rtol=1.0, - # atol=3.0, - # ) pipeline.run() diff --git a/backends/arm/test/models/test_dl3_arm.py b/backends/arm/test/models/test_dl3_arm.py index 2000ac34794..09400143d3f 100644 --- a/backends/arm/test/models/test_dl3_arm.py +++ b/backends/arm/test/models/test_dl3_arm.py @@ -99,11 +99,8 @@ def test_dl3_vgf_INT(): exir_op=[], tosa_version="TOSA-1.0+INT", use_to_edge_transform_and_lower=True, + run_on_vulkan_runtime=False, # TODO: run on vulkan runtime ) - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", rtol=1.0, atol=1.0 - # ) pipeline.run() @@ -117,8 +114,4 @@ def test_dl3_vgf_FP(): tosa_version="TOSA-1.0+FP", use_to_edge_transform_and_lower=True, ) - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", rtol=1.0, atol=1.0 - # ) pipeline.run() diff --git a/backends/arm/test/models/test_lstm_arm.py b/backends/arm/test/models/test_lstm_arm.py index 1e63472f5f4..91427d18b9b 100644 --- a/backends/arm/test/models/test_lstm_arm.py +++ b/backends/arm/test/models/test_lstm_arm.py @@ -111,10 +111,6 @@ def test_lstm_vgf_INT(): tosa_version="TOSA-1.0+INT", use_to_edge_transform_and_lower=True, ) - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 - # ) pipeline.run() @@ -128,8 +124,4 @@ def test_lstm_vgf_FP(): tosa_version="TOSA-1.0+FP", use_to_edge_transform_and_lower=True, ) - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 - # ) pipeline.run() diff --git a/backends/arm/test/models/test_mobilenet_v2_arm.py b/backends/arm/test/models/test_mobilenet_v2_arm.py index 84de432155e..43c2148f129 100644 --- a/backends/arm/test/models/test_mobilenet_v2_arm.py +++ b/backends/arm/test/models/test_mobilenet_v2_arm.py @@ -127,11 +127,8 @@ def test_mv2_vgf_INT(per_channel_quantization): per_channel_quantization=per_channel_quantization, atol=0.25, qtol=1, + run_on_vulkan_runtime=False, ) - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 - # ) pipeline.run() @@ -144,9 +141,6 @@ def test_mv2_vgf_FP(): exir_op=[], tosa_version="TOSA-1.0+FP", use_to_edge_transform_and_lower=True, + run_on_vulkan_runtime=False, ) - # TODO: MLETORCH-1167 Create Vulkan backend e2e tests - # pipeline.change_args( - # "run_method_and_compare_outputs", get_test_inputs(), atol=3e-1, qtol=1.0 - # ) # TODO: MLETORCH-1036 decrease tolerance pipeline.run() diff --git a/backends/arm/test/ops/test_addmm.py b/backends/arm/test/ops/test_addmm.py index 753cb599b2b..90d780dc490 100644 --- a/backends/arm/test/ops/test_addmm.py +++ b/backends/arm/test/ops/test_addmm.py @@ -167,6 +167,7 @@ def test_addmm_u85_INT(test_data: Tuple): @common.parametrize("test_data", test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_addmm_vgf_FP(test_data: input_t1): pipeline = VgfPipeline[input_t1]( Addmm(), @@ -180,6 +181,7 @@ def test_addmm_vgf_FP(test_data: input_t1): @common.parametrize("test_data", test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_addmm_vgf_INT(test_data: input_t1): pipeline = VgfPipeline[input_t1]( Addmm(), diff --git a/backends/arm/test/ops/test_amax.py b/backends/arm/test/ops/test_amax.py index 080dddda92e..5a0ca1f8778 100644 --- a/backends/arm/test/ops/test_amax.py +++ b/backends/arm/test/ops/test_amax.py @@ -140,6 +140,7 @@ def test_max_dim_tosa_FP_not_delegated(): @common.parametrize("test_data", Amax.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_amax_vgf_FP(test_data: Amax.input_t): data, dim, keep_dims = test_data() module = Amax(dim, keep_dims) @@ -154,6 +155,7 @@ def test_amax_vgf_FP(test_data: Amax.input_t): @common.parametrize("test_data", Amax.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_amax_vgf_INT(test_data: Amax.input_t): data, dim, keep_dims = test_data() module = Amax(dim, keep_dims) @@ -168,6 +170,7 @@ def test_amax_vgf_INT(test_data: Amax.input_t): @common.parametrize("test_data", Max.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_max_dim_vgf_FP_to_amax(test_data: Max.input_t): data, dim = test_data() pipeline = VgfPipeline[Max.input_t]( @@ -181,6 +184,7 @@ def test_max_dim_vgf_FP_to_amax(test_data: Max.input_t): @common.parametrize("test_data", Max.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_max_dim_vgf_INT_to_amax(test_data: Max.input_t): data, dim = test_data() pipeline = VgfPipeline[Max.input_t]( diff --git a/backends/arm/test/ops/test_amin.py b/backends/arm/test/ops/test_amin.py index a24da9e1ba0..183d43da585 100644 --- a/backends/arm/test/ops/test_amin.py +++ b/backends/arm/test/ops/test_amin.py @@ -152,6 +152,7 @@ def test_min_dim_tosa_FP_not_delegated(): @common.parametrize("test_data", Amin.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_amin_vgf_FP(test_data: Amin.input_t): data, dim, keep_dims = test_data() pipeline = VgfPipeline[Amin.input_t]( @@ -162,6 +163,7 @@ def test_amin_vgf_FP(test_data: Amin.input_t): @common.parametrize("test_data", Amin.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_amin_vgf_INT(test_data: Amin.input_t): data, dim, keep_dims = test_data() pipeline = VgfPipeline[Amin.input_t]( @@ -175,6 +177,7 @@ def test_amin_vgf_INT(test_data: Amin.input_t): @common.parametrize("test_data", Min.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_min_dim_vgf_FP_to_amin(test_data: Min.input_t): data, dim = test_data() pipeline = VgfPipeline[Min.input_t]( @@ -188,6 +191,7 @@ def test_min_dim_vgf_FP_to_amin(test_data: Min.input_t): @common.parametrize("test_data", Min.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_min_dim_vgf_INT_to_amin(test_data: Min.input_t): data, dim = test_data() pipeline = VgfPipeline[Min.input_t]( diff --git a/backends/arm/test/ops/test_any.py b/backends/arm/test/ops/test_any.py index ae738480048..8c0c9eed54c 100644 --- a/backends/arm/test/ops/test_any.py +++ b/backends/arm/test/ops/test_any.py @@ -6,6 +6,7 @@ from typing import List, Tuple +import pytest import torch from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( @@ -189,6 +190,7 @@ def test_any_u85_INT(test_data: input_t1): @common.parametrize("test_data", test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_any_vgf_FP(test_data: input_t1): op, data_fn = test_data() pipeline = VgfPipeline[input_t1]( @@ -203,6 +205,7 @@ def test_any_vgf_FP(test_data: input_t1): @common.parametrize("test_data", test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_any_vgf_INT(test_data: input_t1): op, data_fn = test_data() pipeline = VgfPipeline[input_t1]( diff --git a/backends/arm/test/ops/test_bmm.py b/backends/arm/test/ops/test_bmm.py index 7c0fc1665bb..9e09414e29b 100644 --- a/backends/arm/test/ops/test_bmm.py +++ b/backends/arm/test/ops/test_bmm.py @@ -186,6 +186,4 @@ def test_bmm_vgf_INT_single_input(test_data: input_t1): exir_op_bmm, tosa_version="TOSA-1.0+INT", ) - # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests - # pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() diff --git a/backends/arm/test/ops/test_clamp.py b/backends/arm/test/ops/test_clamp.py index ba490ccc0c6..68c32e8fcc6 100644 --- a/backends/arm/test/ops/test_clamp.py +++ b/backends/arm/test/ops/test_clamp.py @@ -149,6 +149,4 @@ def test_clamp_vgf_INT(test_data): exir_op, tosa_version="TOSA-1.0+INT", ) - # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests - # pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() diff --git a/backends/arm/test/ops/test_conv_combos.py b/backends/arm/test/ops/test_conv_combos.py index a7a031468ea..f57de4eeb21 100644 --- a/backends/arm/test/ops/test_conv_combos.py +++ b/backends/arm/test/ops/test_conv_combos.py @@ -581,8 +581,6 @@ def test_convolution_2d_vgf_INT_block_bottleneck(test_data): tosa_version="TOSA-1.0+INT", per_channel_quantization=per_channel_quantization, ) - # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests - # pipeline.change_args("run_method_and_compare_outputs", model.get_inputs(), qtol=1) pipeline.run() diff --git a/backends/arm/test/ops/test_index_select.py b/backends/arm/test/ops/test_index_select.py index 95ebaa62a38..6d2a6d73b70 100644 --- a/backends/arm/test/ops/test_index_select.py +++ b/backends/arm/test/ops/test_index_select.py @@ -174,8 +174,4 @@ def test_index_select_vgf_INT_rand(test_data: input_params): op.exir_op, tosa_version="TOSA-1.0+INT", ) - # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests - # pipeline.change_args( - # "run_method_and_compare_outputs", inputs=test_input, atol=0.9, rtol=0.2, qtol=1 - # ) pipeline.run() diff --git a/backends/arm/test/ops/test_logsoftmax.py b/backends/arm/test/ops/test_logsoftmax.py index c4a68caabac..502aa2f27c7 100644 --- a/backends/arm/test/ops/test_logsoftmax.py +++ b/backends/arm/test/ops/test_logsoftmax.py @@ -119,6 +119,4 @@ def test_log_softmax_vgf_INT(test_data): tosa_version="TOSA-1.0+INT", ) pipeline.add_stage_after("quantize", pipeline.tester.check_not, [aten_op]) - # TODO: MLETORCH-1136 Change args of run_method_and_compare_outputs of the vgf tests - # pipeline.change_args("run_method_and_compare_outputs", qtol=1) pipeline.run() diff --git a/backends/arm/test/ops/test_mean_dim.py b/backends/arm/test/ops/test_mean_dim.py index 061e8da14f1..0de2dd3af12 100644 --- a/backends/arm/test/ops/test_mean_dim.py +++ b/backends/arm/test/ops/test_mean_dim.py @@ -4,7 +4,7 @@ # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. - +import pytest import torch from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( @@ -86,6 +86,7 @@ def test_adaptive_avg_pool2d_u85_INT(test_data): @common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_adaptive_avg_pool2d_vgf_FP(test_data): pipeline = VgfPipeline[input_t]( AdaptiveAveragePool2d(), @@ -99,6 +100,7 @@ def test_adaptive_avg_pool2d_vgf_FP(test_data): @common.parametrize("test_data", AdaptiveAveragePool2d.test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_adaptive_avg_pool2d_vgf_INT(test_data): pipeline = VgfPipeline[input_t]( AdaptiveAveragePool2d(), @@ -329,6 +331,7 @@ def test_mean_dim_u85_INT(test_data): @common.parametrize("test_data", MeanDim.test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_mean_dim_vgf_FP(test_data): test_data_val, dim, keep_dim = test_data() pipeline = VgfPipeline[input_t]( @@ -343,6 +346,7 @@ def test_mean_dim_vgf_FP(test_data): @common.parametrize("test_data", MeanDim.test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_mean_dim_vgf_INT(test_data): test_data_val, dim, keep_dim = test_data() pipeline = VgfPipeline[input_t]( diff --git a/backends/arm/test/ops/test_scalar_tensor.py b/backends/arm/test/ops/test_scalar_tensor.py index 22c1cc0373d..b6f59b184a8 100644 --- a/backends/arm/test/ops/test_scalar_tensor.py +++ b/backends/arm/test/ops/test_scalar_tensor.py @@ -2,7 +2,7 @@ # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. - +import pytest import torch from executorch.backends.arm.test import common @@ -104,6 +104,7 @@ def test_scalar_tensor_u85_INT(test_data): @common.parametrize("test_data", float_test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_scalar_tensor_vgf_FP(test_data): scalar, dtype, data = test_data() pipeline = VgfPipeline( @@ -117,6 +118,7 @@ def test_scalar_tensor_vgf_FP(test_data): @common.parametrize("test_data", int_test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_scalar_tensor_vgf_INT(test_data): scalar, dtype, data = test_data() pipeline = VgfPipeline( diff --git a/backends/arm/test/ops/test_select.py b/backends/arm/test/ops/test_select.py index 4c3887f1e18..e6f87826f59 100644 --- a/backends/arm/test/ops/test_select.py +++ b/backends/arm/test/ops/test_select.py @@ -7,6 +7,7 @@ from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common @@ -173,6 +174,7 @@ def test_select_int_u85_INT(test_data: Tuple): @common.parametrize("test_data", test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_select_int_vgf_FP_copy(test_data: Tuple): pipeline = VgfPipeline[input_t1]( SelectCopy(), test_data(), aten_op_copy, [], tosa_version="TOSA-1.0+FP" @@ -182,6 +184,7 @@ def test_select_int_vgf_FP_copy(test_data: Tuple): @common.parametrize("test_data", test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_select_int_vgf_FP(test_data: Tuple): pipeline = VgfPipeline[input_t1]( SelectInt(), test_data(), aten_op_int, [], tosa_version="TOSA-1.0+FP" @@ -191,6 +194,7 @@ def test_select_int_vgf_FP(test_data: Tuple): @common.parametrize("test_data", test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_select_int_vgf_INT_copy(test_data: Tuple): pipeline = VgfPipeline[input_t1]( SelectCopy(), @@ -204,6 +208,7 @@ def test_select_int_vgf_INT_copy(test_data: Tuple): @common.parametrize("test_data", test_data_suite) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_select_int_vgf_INT(test_data: Tuple): pipeline = VgfPipeline[input_t1]( SelectInt(), diff --git a/backends/arm/test/ops/test_silu.py b/backends/arm/test/ops/test_silu.py index edc7d769be1..e451c22adbb 100644 --- a/backends/arm/test/ops/test_silu.py +++ b/backends/arm/test/ops/test_silu.py @@ -8,6 +8,7 @@ from typing import Optional, Tuple +import pytest import torch from executorch.backends.arm.test import common from executorch.backends.arm.test.tester.test_pipeline import ( @@ -116,6 +117,7 @@ def test_silu_u85_INT_inplace(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1387: Output differs") def test_silu_vgf_FP(test_data: input_t): silu_data = (test_data(), False) pipeline = VgfPipeline[input_t]( @@ -126,6 +128,7 @@ def test_silu_vgf_FP(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1387: Output differs") def test_silu_vgf_FP_inplace(test_data: input_t): silu_data = (test_data(), True) pipeline = VgfPipeline[input_t]( @@ -136,6 +139,7 @@ def test_silu_vgf_FP_inplace(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1387: Output differs") def test_silu_vgf_INT(test_data: input_t): silu_data = (test_data(), False) pipeline = VgfPipeline[input_t]( @@ -149,6 +153,7 @@ def test_silu_vgf_INT(test_data: input_t): @common.parametrize("test_data", Silu.test_data) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1387: Output differs") def test_silu_vgf_INT_inplace(test_data: input_t): silu_data = (test_data(), True) pipeline = VgfPipeline[input_t]( diff --git a/backends/arm/test/ops/test_var.py b/backends/arm/test/ops/test_var.py index 9567f90c480..244938dc6b0 100644 --- a/backends/arm/test/ops/test_var.py +++ b/backends/arm/test/ops/test_var.py @@ -6,6 +6,7 @@ from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common @@ -215,6 +216,7 @@ def test_var_dim_u85_INT_no_dim(test_data: Tuple): @common.parametrize("test_data", Var.test_parameters) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_var_dim_vgf_FP_no_dim(test_data: Tuple): data, keepdim, correction = test_data() pipeline = VgfPipeline[input_t1]( @@ -225,6 +227,7 @@ def test_var_dim_vgf_FP_no_dim(test_data: Tuple): @common.parametrize("test_data", Var.test_parameters) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_var_dim_vgf_INT_no_dim(test_data: Tuple): data, keepdim, correction = test_data() pipeline = VgfPipeline[input_t1]( @@ -297,6 +300,7 @@ def test_var_dim_u85_INT(test_data: Tuple): @common.parametrize("test_data", VarDim.test_parameters) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_var_dim_vgf_FP(test_data: Tuple): data, dim, keepdim, unbiased = test_data() pipeline = VgfPipeline[input_t1]( @@ -307,6 +311,7 @@ def test_var_dim_vgf_FP(test_data: Tuple): @common.parametrize("test_data", VarDim.test_parameters) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_var_dim_vgf_INT(test_data: Tuple): data, dim, keepdim, unbiased = test_data() pipeline = VgfPipeline[input_t1]( @@ -378,6 +383,7 @@ def test_var_dim_u85_INT_correction(test_data: Tuple): @common.parametrize("test_data", VarCorrection.test_parameters) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_var_dim_vgf_FP_correction(test_data: Tuple): data, dim, keepdim, corr = test_data() pipeline = VgfPipeline[input_t1]( @@ -388,6 +394,7 @@ def test_var_dim_vgf_FP_correction(test_data: Tuple): @common.parametrize("test_data", VarCorrection.test_parameters) @common.SkipIfNoModelConverter +@pytest.mark.xfail(reason="MLETORCH-1410: Tensor dimension count not supported: 0") def test_var_dim_vgf_INT_correction(test_data: Tuple): data, dim, keepdim, corr = test_data() pipeline = VgfPipeline[input_t1]( diff --git a/backends/arm/test/tester/test_pipeline.py b/backends/arm/test/tester/test_pipeline.py index b0446f948c0..54a8f08ee50 100644 --- a/backends/arm/test/tester/test_pipeline.py +++ b/backends/arm/test/tester/test_pipeline.py @@ -922,7 +922,7 @@ def __init__( test_data: T, aten_op: str | List[str], exir_op: Optional[str | List[str]] = None, - run_on_vulkan_runtime: bool = False, + run_on_vulkan_runtime: bool = True, vgf_compiler_flags: Optional[str] = "", tosa_version: str = "TOSA-1.0+FP", symmetric_io_quantization: bool = False,