diff --git a/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc b/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc index 373b2121a9b60..e46d258abaeff 100644 --- a/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc +++ b/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc @@ -35,41 +35,23 @@ namespace openvino_ep { // Ops which are supported only in models(as intermediate nodes) and not in unit tests std::set ops_supported_only_in_model = { - "Add", "Cast", "Celu", - "Concat", "ConstantOfShape", - "DequantizeLinear", "Dropout", "Einsum", - "Exp", - "Expand", - "EyeLike", "GatherElements", "GatherND", "GridSample", - "Identity", "LayerNormalization", - "Loop", "LSTM", - "NonMaxSuppression", - "NonZero", - "Not", "OneHot", "Pad", - "QuantizeLinear", "RandomNormalLike", - "Range", "ReduceMin", - "Resize", - "Round", - "Shape", "Slice", - "Split", - "Tile", - "TopK", - "Trilu"}; + "TopK" + }; // Ops which are supported as functions (as composite ops) std::set ops_supported_as_function = { @@ -269,6 +251,8 @@ void DataOps::populate_types_supported() { std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT)); supported_types_initializer_.insert( std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32)); + supported_types_initializer_.insert( + std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32)); supported_types_initializer_.insert( std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64)); supported_types_initializer_.insert( @@ -317,6 +301,8 @@ void DataOps::populate_types_supported() { std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL)); supported_types_cpu_.insert( std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT)); + supported_types_cpu_.insert( + std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32)); supported_types_cpu_.insert( std::make_pair(V_2020_4, ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32)); supported_types_cpu_.insert( @@ -367,6 +353,7 @@ void DataOps::populate_op_mode_supported() { no_dimension_supported_.push_back({"DynamicQuantizeLinear", V_2025_2, {"All"}}); no_dimension_supported_.push_back({"Equal", V_2022_1, {"CPU"}}); no_dimension_supported_.push_back({"Equal", V_2023_0, {"GPU"}}); + no_dimension_supported_.push_back({"Exp", V_2020_4, {"CPU", "GPU"}}); no_dimension_supported_.push_back({"Expand", V_2023_3, {"CPU"}}); no_dimension_supported_.push_back({"Expand", V_2024_3, {"CPU", "GPU"}}); no_dimension_supported_.push_back({"Floor", V_2020_4, {"All"}}); @@ -382,6 +369,7 @@ void DataOps::populate_op_mode_supported() { no_dimension_supported_.push_back({"Mul", V_2020_4, {"All"}}); no_dimension_supported_.push_back({"Neg", V_2023_0, {"CPU", "GPU"}}); no_dimension_supported_.push_back({"Pow", V_2023_0, {"CPU", "GPU"}}); + no_dimension_supported_.push_back({"PRelu", V_2020_4, {"CPU", "GPU"}}); no_dimension_supported_.push_back({"QuantizeLinear", V_2021_4, {"All"}}); no_dimension_supported_.push_back({"Range", V_2021_2, {"All"}}); no_dimension_supported_.push_back({"ReduceMax", V_2021_4, {"All"}}); @@ -489,6 +477,38 @@ void DataOps::populate_op_mode_supported() { }}; op_list_.insert({"Upsample", obj}); } + { + UnsupportedOpMode obj = {{V_2023_1, V_2023_2, V_2023_3, V_2024_0, V_2024_1, V_2024_2, + V_2024_3, V_2024_4, V_2024_5, V_2024_6, V_2025_0, V_2025_1, V_2025_2, V_2025_3, V_2025_4}, + [this](const Node* node, const InitializedTensorSet&) { + auto& attributes = node->GetAttributes(); + if (attributes.count("coordinate_transformation_mode") > 0) { + auto coordinate_transformation_mode = + attributes.at("coordinate_transformation_mode").s(); + if (coordinate_transformation_mode == "tf_crop_and_resize" || + coordinate_transformation_mode == "half_pixel_symmetric") { + return true; + } + } + if (attributes.count("antialias") > 0) { + auto antialias_mode = + attributes.at("antialias").i(); + auto resize_mode = attributes.at("mode").s(); + if (antialias_mode == 1 && + (resize_mode == "linear" || + resize_mode == "cubic")) { + return true; + } + } + if (attributes.count("exclude_outside") > 0) { + if (attributes.at("exclude_outside").i() == 1) { + return true; + } + } + return false; + }}; + op_list_.insert({"Resize", obj}); + } } bool DataOps::op_is_supported(std::string name, std::vector& op_list) { diff --git a/onnxruntime/test/contrib_ops/quantize_ops_test.cc b/onnxruntime/test/contrib_ops/quantize_ops_test.cc index db685967ae5ff..5af8999206258 100644 --- a/onnxruntime/test/contrib_ops/quantize_ops_test.cc +++ b/onnxruntime/test/contrib_ops/quantize_ops_test.cc @@ -287,9 +287,46 @@ TEST(QuantizeLinearContribOpTest, QuantizeLinear_per_tensor_float_int8) { 127, -127, 127, -128, 127, -128}); + std::unordered_set excluded_providers; // Disable Tensorrt EP due to error: node1_quantize_scale_node: out of bounds channel axis 1. Number of input dimensions is 1. - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); +} + +#ifdef USE_OPENVINO +TEST(QuantizeLinearContribOpTest, OVEPQuantizeLinear_per_tensor_float_int8) { + OpTester test("QuantizeLinear", 1, onnxruntime::kMSDomain); + std::vector dims{16}; + test.AddInput("x", dims, { + 0.f, 2.f, // + 3.f, -3.f, // rounding half to even + 2.9f, -2.9f, // low case + 3.1f, -3.1f, // up case + 254.f, -256.f, // critical point + 255.f, -257.f, // critical point + 256.f, -258.f, // critical point + 1000.f, -1000.f // saturate case + }); + test.AddInput("y_scale", {}, {2.0f}); + test.AddInput("y_zero_point", {}, {1}); + test.AddOutput("y", dims, + {1, 2, + 2, 0, + 2, 0, + 3, -1, + 127, -127, + 127, -128, + 127, -128, + 127, -128}); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } +#endif // USE_OPENVINO // Test uint16 com.microsoft.QuantizeLinear (per tensor) TEST(QuantizeLinearContribOpTest, QuantizeLinear_per_tensor_float_uint16) { @@ -311,10 +348,41 @@ TEST(QuantizeLinearContribOpTest, QuantizeLinear_per_tensor_float_uint16) { 32769, 32765, 65535, 0, 65535, 0}); - + std::unordered_set excluded_providers; // Disable Tensorrt EP due to error: unsupported data type - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); +} + +#ifdef USE_OPENVINO +TEST(QuantizeLinearContribOpTest, OVEPQuantizeLinear_per_tensor_float_uint16) { + OpTester test("QuantizeLinear", 1, onnxruntime::kMSDomain); + std::vector dims{12}; + test.AddInput("x", dims, { + 0.f, -128.f, 3.f, -3.f, // rounding half to even + 2.9f, -2.9f, // round < .5 + 3.1f, -3.1f, // round > .5 + 65536.f, -65534.f, // critical point + 70000.f, -70000.f // saturate case + }); + test.AddInput("scale", {}, {2.0f}, true); + test.AddInput("zero_point", {}, {32767}, true); + test.AddOutput("y", dims, + {32767, 32703, + 32768, 32766, + 32768, 32766, + 32769, 32765, + 65535, 0, + 65535, 0}); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } +#endif // USE_OPENVINO // Test int16 com.microsoft.QuantizeLinear (per tensor) TEST(QuantizeLinearContribOpTest, QuantizeLinear_per_tensor_float_int16) { diff --git a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc index 10affa538dfad..312c6872b13fa 100644 --- a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc +++ b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc @@ -1037,7 +1037,7 @@ TEST(Loop, IterationCountAsOutput) { test.AddOutput("loop_var_0_final", {3, 1}, {0, 1, 2}); // Disable TensorRT on unsupported data type BOOL - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); } #if defined(USE_CUDA) diff --git a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc index 198fa07ae4ed0..4d153b67447fd 100644 --- a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc +++ b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc @@ -526,7 +526,7 @@ TEST(ConvTransposeTest, ConvTranspose_InvalidKernelShape) { // so drop the part that differs from the expected string "kernel_shape num_dims is not compatible with W num_dims. kernel_shape: {1,1,1,5} W: {1,1,", {kTensorrtExecutionProvider, kQnnExecutionProvider, - kDmlExecutionProvider}); // TODO: Unskip when fixed #41968513 + kDmlExecutionProvider, kOpenVINOExecutionProvider}); // TODO: Unskip when fixed #41968513 } TEST(ConvTransposeTest, ConvTranspose_onnx) { diff --git a/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc b/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc index 289e94397fb39..3cfce72bc48b3 100644 --- a/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/cast_op_test.cc @@ -75,6 +75,11 @@ void TestCastOp(gsl::span input, excluded_provider_types.insert(kCudaExecutionProvider); } + if (input.size() == 0) { + // The OpenVINO doesn't support 0 size input + excluded_provider_types.insert(kOpenVINOExecutionProvider); + } + if (cuda_only && (excluded_provider_types.count(kCudaExecutionProvider) > 0)) { return; } diff --git a/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc b/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc index bd8aad5f85514..c8808cbcc76ce 100644 --- a/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/quantize_linear_test.cc @@ -449,10 +449,43 @@ TEST(QuantizeLinearOpTest, Uint16) { 65535, 0, 65535, 0}); + std::unordered_set excluded_providers; // Disable Tensorrt EP due to error: unsupported data type - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); } +#ifdef USE_OPENVINO +TEST(QuantizeLinearOpTest, OVEP_Uint16) { + OpTester test("QuantizeLinear", 21); + std::vector dims{12}; + test.AddInput("x", dims, { + 0.f, -128.f, 3.f, -3.f, // rounding half to even + 2.9f, -2.9f, // round < .5 + 3.1f, -3.1f, // round > .5 + 65536.f, -65534.f, // critical point + 70000.f, -70000.f // saturate case + }); + test.AddInput("scale", {}, {2.0f}, true); + test.AddInput("zero_point", {}, {32767}, true); + test.AddOutput("y", dims, + {32767, 32703, + 32768, 32766, + 32768, 32766, + 32769, 32765, + 65535, 0, + 65535, 0}); + + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); +} +#endif // USE_OPENVINO + // Test int16 QuantizeLinear (per tensor) TEST(QuantizeLinearOpTest, Int16) { OpTester test("QuantizeLinear", 21); @@ -502,8 +535,40 @@ TEST(QuantizeLinearOpTest, Int4) { {Int4x2(-8, -7), Int4x2(-1, 1), Int4x2(2, 7), Int4x2(7, unused_val)}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + std::unordered_set excluded_providers; + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); +} + +#ifdef USE_OPENVINO +TEST(QuantizeLinearOpTest, OVEP_Int4) { + OpTester test("QuantizeLinear", 21); + std::vector dims{7}; + constexpr int8_t unused_val = 0; + test.AddInput("x", dims, { + -20.0f, // Clamp to qmin + -16.0f, // Close to qmin + -3.0f, // round + 0.0f, // Zero-point + 2.9f, // round + 12.0f, // qmax + 20.0f, // Clamp to qmax + }); + test.AddInput("scale", {}, {2.0f}, true); + test.AddInput("zero_point", {}, {Int4x2(1, unused_val)}, true); + test.AddOutput("y", dims, + {Int4x2(-8, -7), Int4x2(0, 1), Int4x2(2, 7), + Int4x2(7, unused_val)}); + + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } +#endif // USE_OPENVINO // Test uint4 QuantizeLinear (per tensor) TEST(QuantizeLinearOpTest, UInt4) { @@ -569,7 +634,12 @@ TEST(QuantizeLinearOpTest, OddLarge_Int4) { test.AddInput("zero_point", {}, {Int4x2(zp, unused_val)}, true); test.AddOutput("y", dims, output); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + std::unordered_set excluded_providers; + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); } // Test uint4 QuantizeLinear (per tensor) with a "large" and odd number of input elements. @@ -595,7 +665,12 @@ TEST(QuantizeLinearOpTest, OddLarge_UInt4) { test.AddInput("zero_point", {}, {UInt4x2(zp, unused_val)}, true); test.AddOutput("y", dims, output); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + std::unordered_set excluded_providers; + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); } // quantize with scalar zero point and scale @@ -611,10 +686,31 @@ TEST(QuantizeLinearOpTest, Int8_NegativeZeroPoint) { test.AddInput("y_scale", {}, {.039215686f}); test.AddInput("y_zero_point", {}, {-23}); test.AddOutput("y", dims, {-23, 28, 53, 104, 127, -74, -128, -128}); + std::unordered_set excluded_providers; // Disable Tensorrt EP due to the error, node1_quantize_scale_node: out of bounds channel axis 1. Number of input dimensions is 1. - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); } +#ifdef USE_OPENVINO +TEST(QuantizeLinearOpTest, OVEP_Int8_NegativeZeroPoint) { + + OpTester test("QuantizeLinear", 10); + std::vector dims{8}; + test.AddInput("x", dims, {0, 2, 3, 5, 6, -2, -5, -6}); + test.AddInput("y_scale", {}, {.039215686f}); + test.AddInput("y_zero_point", {}, {-23}); + test.AddOutput("y", dims, {-23, 28, 54, 105, 127, -74, -128, -128}); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); +} +#endif // USE_OPENVINO + // quantize with scalar zero point and scale TEST(QuantizeLinearOpTest, Int8_PositiveZeroPoint) { // TODO: Unskip when fixed #41968513 @@ -628,9 +724,34 @@ TEST(QuantizeLinearOpTest, Int8_PositiveZeroPoint) { test.AddInput("y_scale", {}, {.039215686f}); test.AddInput("y_zero_point", {}, {23}); test.AddOutput("y", dims, {23, 74, 99, 127, 127, -28, -104, -128}); + std::unordered_set excluded_providers; // Disable Tensorrt EP due to error:node1_quantize_scale_node: out of bounds channel axis 1. Number of input dimensions is 1. - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + excluded_providers.insert(kTensorrtExecutionProvider); + // Disable OV EP due to different formulation for QuantizeLinear + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); +} + +#ifdef USE_OPENVINO +TEST(QuantizeLinearOpTest, OVEP_Int8_PositiveZeroPoint) { + // TODO: Unskip when fixed #41968513 + if (DefaultDmlExecutionProvider().get() != nullptr) { + GTEST_SKIP() << "Skipping because of the following error: Expected equality of these values: -104 and -105"; + } + + OpTester test("QuantizeLinear", 10); + std::vector dims{8}; + test.AddInput("x", dims, {0, 2, 3, 5, 6, -2, -5, -6}); + test.AddInput("y_scale", {}, {.039215686f}); + test.AddInput("y_zero_point", {}, {23}); + test.AddOutput("y", dims, {23, 74, 100, 127, 127, -28, -104, -128}); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } +#endif // USE_OPENVINO // quantize with 2D data TEST(QuantizeLinearOpTest, 2D) { diff --git a/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc b/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc index be3516437b1aa..0382cf2bc7879 100644 --- a/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/resize_op_test.cc @@ -304,10 +304,43 @@ TEST(ResizeOpTest, NhwcResizeOpLinearDownSampleTest_4DBilinear_uint8) { std::vector Y = {2, 4}; test.AddOutput("Y", {N, static_cast(H * scales[1]), static_cast(W * scales[2]), C}, Y); + std::unordered_set excluded_providers; // CUDA: result mismatch due to not implementing NHWC support - test.Run(OpTester::ExpectResult::kExpectSuccess, "", - {kCudaExecutionProvider, kCudaNHWCExecutionProvider}); + // ROCm: results mismatch + excluded_providers.insert(kCudaExecutionProvider); + excluded_providers.insert(kCudaNHWCExecutionProvider); + // Disable OV EP due to round when converting from float to uint8 + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); +} + +#ifdef USE_OPENVINO +TEST(ResizeOpTest, OVEPNhwcResizeOpLinearDownSampleTest_4DBilinear_uint8) { + OpTester test("Resize", 13); + std::vector roi{}; + std::vector scales{1.0f, 0.6f, 0.6f, 1.0f}; + + test.AddAttribute("mode", "linear"); + + constexpr int64_t N = 1, H = 2, W = 4, C = 1; + std::vector X = { + 1, 2, 3, 4, + 5, 6, 7, 8}; + + test.AddInput("X", {N, H, W, C}, X); + test.AddInput("roi", {0}, roi); + test.AddInput("scales", {4}, scales); + + std::vector Y = {3, 4}; + + test.AddOutput("Y", {N, static_cast(H * scales[1]), static_cast(W * scales[2]), C}, Y); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } +#endif // USE_OPENVINO TEST(ResizeOpTest, NhwcResizeOpLinearDownSampleTest_4DBilinear_int8) { OpTester test("Resize", 13); @@ -641,11 +674,50 @@ TEST(ResizeOpTest, NhwcResizeOpLinearDownSampleTest_4DBilinear_pytorch_half_pixe std::vector Y = {1, 7, 12}; test.AddOutput("Y", {N, sizes[1], sizes[2], C}, Y); + std::unordered_set excluded_providers; // CUDA: result mismatch due to not implementing NHWC support // DML: results mismatch - test.Run(OpTester::ExpectResult::kExpectSuccess, "", - {kCudaExecutionProvider, kCudaNHWCExecutionProvider, kDmlExecutionProvider}); + excluded_providers.insert(kCudaExecutionProvider); + excluded_providers.insert(kCudaNHWCExecutionProvider); + excluded_providers.insert(kDmlExecutionProvider); + // Disable OV EP due to round when converting from float to uint8 + excluded_providers.insert(kOpenVINOExecutionProvider); + test.ConfigExcludeEps(excluded_providers) + .RunWithConfig(); +} + +#ifdef USE_OPENVINO +TEST(ResizeOpTest, OVEPNhwcResizeOpLinearDownSampleTest_4DBilinear_pytorch_half_pixel_uint8) { + OpTester test("Resize", 13); + std::vector roi{}; + std::vector scales{}; + std::vector sizes{1, 3, 1, 1}; + + test.AddAttribute("mode", "linear"); + test.AddAttribute("coordinate_transformation_mode", "pytorch_half_pixel"); + + constexpr int64_t N = 1, H = 4, W = 4, C = 1; + + std::vector X = { + 1, 2, 3, 4, + 5, 6, 7, 8, + 9, 10, 11, 12, + 13, 14, 15, 16}; + + test.AddInput("X", {N, H, W, C}, X); + test.AddInput("roi", {0}, roi); + test.AddInput("", {0}, scales); + test.AddInput("sizes", {4}, sizes); + + std::vector Y = {2, 7, 12}; + + test.AddOutput("Y", {N, sizes[1], sizes[2], C}, Y); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } +#endif // USE_OPENVINO TEST(ResizeOpTest, NhwcResizeOpLinearDownSampleTest_4DBilinear_pytorch_half_pixel_int8) { OpTester test("Resize", 13); @@ -755,12 +827,60 @@ TEST(ResizeOpTest, NhwcResizeOpLinearUpSampleTest_4DBilinear_asymmetric_uint8) { Y, false, .0f, 1.0f); // CUDA: result mismatch due to not implementing NHWC support test.Run(OpTester::ExpectResult::kExpectSuccess, "", - {kCudaExecutionProvider, kCudaNHWCExecutionProvider}); + {kCudaExecutionProvider, kCudaNHWCExecutionProvider, kOpenVINOExecutionProvider}); + }; + + run_test(false); + run_test(true); +} + +#ifdef USE_OPENVINO +TEST(ResizeOpTest, OVEPNhwcResizeOpLinearUpSampleTest_4DBilinear_asymmetric_uint8) { + // To test NNAPI EP, we need the scales/sizes to be in initializers + auto run_test = [](bool scales_in_initializer) { + OpTester test("Resize", 13); + std::vector roi{}; + std::vector scales{1.0f, 2.0f, 4.0f, 1.0f}; + + test.AddAttribute("mode", "linear"); + test.AddAttribute("coordinate_transformation_mode", "asymmetric"); + + constexpr int64_t N = 2, H = 2, W = 2, C = 1; + std::vector X = {1, 3, + 4, 8, + + 6, 2, + 7, 11}; + + test.AddInput("X", {N, H, W, C}, X); + test.AddInput("roi", {0}, roi); + test.AddInput("scales", {4}, scales, scales_in_initializer); + + std::vector Y = { + 1, 2, 2, 2, 3, 3, 3, 3, + 2, 3, 4, 5, 6, 6, 6, 6, + 4, 5, 6, 7, 8, 8, 8, 8, + 4, 5, 6, 7, 8, 8, 8, 8, + + 6, 5, 4, 3, 2, 2, 2, 2, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 8, 9, 10, 11, 11, 11, 11, + 7, 8, 9, 10, 11, 11, 11, 11}; + + // Due to Xnnpack EP has a different rounding behavior, we need to allow a tolerance of 1 + // The tolerance only works for Xnnpack EP + test.AddOutput("Y", {N, static_cast(H * scales[1]), static_cast(W * scales[2]), C}, + Y, false, .0f, 1.0f); + std::vector> execution_providers; + execution_providers.emplace_back(DefaultOpenVINOExecutionProvider()); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); }; run_test(false); run_test(true); } +#endif // USE_OPENVINO TEST(ResizeOpTest, NhwcResizeOpLinearUpSampleTest_4DBilinear_asymmetric_int8) { // To test NNAPI EP, we need the scales/sizes to be in initializers @@ -2477,7 +2597,7 @@ TEST(ResizeOpTest, NoAntialias_AlignCorners_Cubic_Floor_NHWC) { 23.0000f, 24.0000f, }; // clang-format on - InlinedVector excluded_eps = {kCudaExecutionProvider}; + InlinedVector excluded_eps = {kCudaExecutionProvider, kOpenVINOExecutionProvider}; TestAntialiasing( {{"antialias", "0"}, {"coordinate_transformation_mode", "align_corners"}, diff --git a/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc b/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc index 5b2865a3feed7..38bc326943c6f 100644 --- a/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc +++ b/onnxruntime/test/providers/cpu/tensor/slice_op.test.cc @@ -54,6 +54,7 @@ void RunSliceTest(const std::vector& input_dims, if (onnx_shape_disagreement) { excluded_providers.insert(kCoreMLExecutionProvider); + excluded_providers.insert(kOpenVINOExecutionProvider); } if (!v10_only) {