diff --git a/onnxruntime/test/contrib_ops/quantize_ops_test.cc b/onnxruntime/test/contrib_ops/quantize_ops_test.cc index de10f14ef4538..db685967ae5ff 100644 --- a/onnxruntime/test/contrib_ops/quantize_ops_test.cc +++ b/onnxruntime/test/contrib_ops/quantize_ops_test.cc @@ -287,7 +287,6 @@ TEST(QuantizeLinearContribOpTest, QuantizeLinear_per_tensor_float_int8) { 127, -127, 127, -128, 127, -128}); - test.SetOutputAbsErr("y", 1.0f); // Disable Tensorrt EP due to error: node1_quantize_scale_node: out of bounds channel axis 1. Number of input dimensions is 1. test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } @@ -312,7 +311,6 @@ TEST(QuantizeLinearContribOpTest, QuantizeLinear_per_tensor_float_uint16) { 32769, 32765, 65535, 0, 65535, 0}); - test.SetOutputAbsErr("y", 1.0f); // Disable Tensorrt EP due to error: unsupported data type test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); diff --git a/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc b/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc index ed67b531ef394..289e94397fb39 100644 --- a/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc @@ -853,9 +853,6 @@ TEST(CastOpTest, Int32ToInt4x2OddNumberOfElements) { } TEST(CastOpTest, Int32ToInt4x2EmptyTensor) { - if (DefaultOpenVINOExecutionProvider().get() != nullptr) { - GTEST_SKIP() << "The OpenVINO not support 0 size input"; - } // GIVEN const std::vector empty_shape{0}; const std::vector empty_input = {}; diff --git a/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc b/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc index c0325c07bab5e..bd8aad5f85514 100644 --- a/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc @@ -448,7 +448,6 @@ TEST(QuantizeLinearOpTest, Uint16) { 32769, 32765, 65535, 0, 65535, 0}); - test.SetOutputAbsErr("y", 1.0f); // Disable Tensorrt EP due to error: unsupported data type test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); @@ -478,7 +477,6 @@ TEST(QuantizeLinearOpTest, Int16) { 32767, -32768, 32767, -32768, 32767, -32768}); - test.SetOutputAbsErr("y", 1.0f); // Disable Tensorrt EP due to error: unsupported data type test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); @@ -503,7 +501,6 @@ TEST(QuantizeLinearOpTest, Int4) { test.AddOutput("y", dims, {Int4x2(-8, -7), Int4x2(-1, 1), Int4x2(2, 7), Int4x2(7, unused_val)}); - test.SetOutputAbsErr("y", 1.0f); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } @@ -571,7 +568,6 @@ TEST(QuantizeLinearOpTest, OddLarge_Int4) { test.AddInput("scale", {}, {scale}, true); test.AddInput("zero_point", {}, {Int4x2(zp, unused_val)}, true); test.AddOutput("y", dims, output); - test.SetOutputAbsErr("y", 1.0f); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } @@ -598,7 +594,6 @@ TEST(QuantizeLinearOpTest, OddLarge_UInt4) { test.AddInput("scale", {}, {scale}, true); test.AddInput("zero_point", {}, {UInt4x2(zp, unused_val)}, true); test.AddOutput("y", dims, output); - test.SetOutputAbsErr("y", 1.0f); test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } @@ -616,7 +611,6 @@ TEST(QuantizeLinearOpTest, Int8_NegativeZeroPoint) { test.AddInput("y_scale", {}, {.039215686f}); test.AddInput("y_zero_point", {}, {-23}); test.AddOutput("y", dims, {-23, 28, 53, 104, 127, -74, -128, -128}); - test.SetOutputAbsErr("y", 1.0f); // Disable Tensorrt EP due to the error, node1_quantize_scale_node: out of bounds channel axis 1. Number of input dimensions is 1. test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } @@ -634,7 +628,6 @@ TEST(QuantizeLinearOpTest, Int8_PositiveZeroPoint) { test.AddInput("y_scale", {}, {.039215686f}); test.AddInput("y_zero_point", {}, {23}); test.AddOutput("y", dims, {23, 74, 99, 127, 127, -28, -104, -128}); - test.SetOutputAbsErr("y", 1.0f); // Disable Tensorrt EP due to error:node1_quantize_scale_node: out of bounds channel axis 1. Number of input dimensions is 1. test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); } diff --git a/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc b/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc index c47bdd31f8458..be3516437b1aa 100644 --- a/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc @@ -304,7 +304,6 @@ TEST(ResizeOpTest, NhwcResizeOpLinearDownSampleTest_4DBilinear_uint8) { std::vector Y = {2, 4}; test.AddOutput("Y", {N, static_cast(H * scales[1]), static_cast(W * scales[2]), C}, Y); - test.SetOutputAbsErr("Y", 1.0f); // CUDA: result mismatch due to not implementing NHWC support test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kCudaExecutionProvider, kCudaNHWCExecutionProvider}); @@ -642,7 +641,6 @@ TEST(ResizeOpTest, NhwcResizeOpLinearDownSampleTest_4DBilinear_pytorch_half_pixe std::vector Y = {1, 7, 12}; test.AddOutput("Y", {N, sizes[1], sizes[2], C}, Y); - test.SetOutputAbsErr("Y", 1.0f); // CUDA: result mismatch due to not implementing NHWC support // DML: results mismatch test.Run(OpTester::ExpectResult::kExpectSuccess, "", diff --git a/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc b/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc index 657f3fe9c127a..5b2865a3feed7 100644 --- a/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc +++ b/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc @@ -540,10 +540,6 @@ TEST(SliceTest, Slice1D_ReverseAllAxes_1) { GTEST_SKIP() << "Skipping because of the following error: Expected output shape [{4}] did not match run output shape [{0}] for output"; } - if (DefaultOpenVINOExecutionProvider().get() != nullptr) { - GTEST_SKIP() << "Skipping because of the following error: The input ends do not support int max when step is negative."; - } - RunSliceTest({4}, {1.0f, 2.0f, 3.0f, 4.0f}, {-1}, diff --git a/onnxruntime/test/unittest_util/checkers.cc b/onnxruntime/test/unittest_util/checkers.cc index d4b30cd11f1a0..7b2a5a4a4ff2f 100644 --- a/onnxruntime/test/unittest_util/checkers.cc +++ b/onnxruntime/test/unittest_util/checkers.cc @@ -225,27 +225,17 @@ template <> struct TensorCheck { void operator()(const Tensor& expected, const Tensor& actual, const ValidateOutputParams& params, const std::string& /*provider_type*/) const { - const bool has_abs_err = params.absolute_error.has_value(); - Tensor expected_sorted, actual_sorted; + ORT_UNUSED_PARAMETER(params); const Int4x2* cur_expected; const Int4x2* cur_actual; const auto size = narrow(actual.Shape().Size()); cur_expected = expected.Data(); cur_actual = actual.Data(); - double threshold = 0.0f; - if (has_abs_err) { - threshold = *(params.absolute_error); - } for (size_t i = 0; i < size; ++i) { size_t r = i >> 1; size_t c = i & 0x1; - // TODO: the relative error is not used for int4 yet. - if (has_abs_err) { - EXPECT_NEAR(cur_expected[r].GetElem(c), cur_actual[r].GetElem(c), threshold) << "i:" << i; - } else { - EXPECT_EQ(cur_expected[r].GetElem(c), cur_actual[r].GetElem(c)) << "i:" << i; - } + EXPECT_EQ(cur_expected[r].GetElem(c), cur_actual[r].GetElem(c)) << "i:" << i; } } }; @@ -254,28 +244,17 @@ template <> struct TensorCheck { void operator()(const Tensor& expected, const Tensor& actual, const ValidateOutputParams& params, const std::string& /*provider_type*/) const { - const bool has_abs_err = params.absolute_error.has_value(); - Tensor expected_sorted, actual_sorted; + ORT_UNUSED_PARAMETER(params); const UInt4x2* cur_expected; const UInt4x2* cur_actual; const auto size = narrow(actual.Shape().Size()); cur_expected = expected.Data(); cur_actual = actual.Data(); - double threshold = 0.0f; - if (has_abs_err) { - threshold = *(params.absolute_error); - } - - for (size_t i = 0; i < static_cast(size); ++i) { + for (size_t i = 0; i < size; ++i) { size_t r = i >> 1; size_t c = i & 0x1; - // TODO: the relative error is not used for int4 yet. - if (has_abs_err) { - EXPECT_NEAR(cur_expected[r].GetElem(c), cur_actual[r].GetElem(c), threshold) << "i:" << i; - } else { - EXPECT_EQ(cur_expected[r].GetElem(c), cur_actual[r].GetElem(c)) << "i:" << i; - } + EXPECT_EQ(cur_expected[r].GetElem(c), cur_actual[r].GetElem(c)) << "i:" << i; } } }; @@ -313,7 +292,7 @@ struct TensorCheck { // For any other EPs, we still expect an exact match for the results // TODO: Verify if DML can possibly have a ROUNDING_MODE parameter and conform to the other EPs #41968513 if ((provider_type == kNnapiExecutionProvider || provider_type == kDmlExecutionProvider || - provider_type == kXnnpackExecutionProvider || provider_type == kOpenVINOExecutionProvider) && + provider_type == kXnnpackExecutionProvider) && (has_abs_err || has_rel_err)) { double threshold = has_abs_err ? *(params.absolute_error) : 0.0; @@ -378,49 +357,6 @@ struct TensorCheck { } }; -template <> -struct TensorCheck { - void operator()(const Tensor& expected, - const Tensor& actual, - const ValidateOutputParams& params, - const std::string&) const { - const bool has_abs_err = params.absolute_error.has_value(); - const bool has_rel_err = params.relative_error.has_value(); - - Tensor expected_sorted, actual_sorted; - const uint16_t* cur_expected; - const uint16_t* cur_actual; - const auto size = actual.Shape().Size(); - if (params.sort_output) { - sort_expected_and_actual_buffers(expected, expected_sorted, actual, actual_sorted); - cur_expected = expected_sorted.Data(); - cur_actual = actual_sorted.Data(); - } else { - cur_expected = expected.Data(); - cur_actual = actual.Data(); - } - - if (has_abs_err || has_rel_err) { - double threshold = has_abs_err ? *(params.absolute_error) - : 0.0; - - for (int64_t i = 0; i < size; ++i) { - if (has_rel_err) { - EXPECT_NEAR(cur_expected[i], cur_actual[i], - *(params.relative_error) * cur_expected[i]) // expected[i] is unsigned, can't be negative - << "i:" << i; - } else { // has_abs_err - EXPECT_NEAR(cur_expected[i], cur_actual[i], threshold) << "i:" << i; - } - } - } else { - for (int64_t i = 0; i < size; ++i) { - EXPECT_EQ(cur_expected[i], cur_actual[i]) << "i:" << i; - } - } - } -}; - template <> struct TensorCheck { void operator()(const Tensor& expected,