diff --git a/backends/arm/operators/op_repeat.py b/backends/arm/operators/op_repeat.py index 5db7ce9347c..9ee4e9fedf8 100644 --- a/backends/arm/operators/op_repeat.py +++ b/backends/arm/operators/op_repeat.py @@ -44,7 +44,7 @@ def define_node( validate_valid_dtype( self.target, [inputs[0], output], - [ts.DType.INT8, ts.DType.INT32, ts.DType.FP32], + [ts.DType.INT8, ts.DType.INT32, ts.DType.INT16, ts.DType.FP32], output.tosa_spec, ) diff --git a/backends/arm/test/ops/test_mul.py b/backends/arm/test/ops/test_mul.py index e3f2096e7da..2c7b040658a 100644 --- a/backends/arm/test/ops/test_mul.py +++ b/backends/arm/test/ops/test_mul.py @@ -338,9 +338,6 @@ def test_mul_tensor_16a8w_tosa_INT(test_data: input_t1): @common.parametrize("test_data", test_data_suite) @common.XfailIfNoCorstone300 -@pytest.mark.xfail( - reason="Vela compilation fails with 'Invalid arguments' for int16 mul operations. See: https://github.com/pytorch/executorch/issues/13947" -) def test_mul_tensor_16a8w_u55_INT16(test_data: input_t1): """Test mul operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)""" per_channel_quantization = False @@ -365,9 +362,6 @@ def test_mul_tensor_16a8w_u55_INT16(test_data: input_t1): @common.parametrize("test_data", test_data_suite) @common.XfailIfNoCorstone320 -@pytest.mark.xfail( - reason="Vela compilation fails with 'Invalid arguments' for int16 mul operations. See: https://github.com/pytorch/executorch/issues/13947" -) def test_mul_tensor_16a8w_u85_INT16(test_data: input_t1): """Test mul operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)""" per_channel_quantization = False