From 6af0394cd26463ffbbb389fc2079f0e709a679cc Mon Sep 17 00:00:00 2001 From: alexlyulkov Date: Wed, 15 May 2024 11:51:59 +0300 Subject: [PATCH] Merge pull request #25458 from alexlyulkov:al/dnn-openvino-int-support Added int support for OpenVINO dnn backend #25458 Modified dnn OpenVINO integration to support type inference and int operations. Added OpenVINO support to Cast, CumSum, Expand, Gather, GatherElements, Scatter, ScatterND, Tile layers. I tried to add Reduce layer, but looks like OpenVINO uses float values inside Reduce operation so it can't pass our int tests. OpenVINO uses int32 precision for int64 operations, so I've modified input values for int64 tests when backend is OpenVINO. OpenVINO has a strange behavior with custom layers and int64 values. After model compilation OpenVINO may change types, so the model can have different output type. That's why these tests were disabled: - Test_ArgMax_Int.random/0, where GetParam() = (4, NGRAPH/CPU) - Test_ArgMax_Int.random/6, where GetParam() = (11, NGRAPH/CPU) - Test_Reduce_Int.random/6, where GetParam() = (11, NGRAPH/CPU) - Test_Reduce_Int.two_axes/6, where GetParam() = (11, NGRAPH/CPU) Also these tests were temporary disabled, they didn't work on both 4.x and 5.x branches: - Test_Caffe_layers.layer_prelu_fc/0, where GetParam() = NGRAPH/CPU - Test_ONNX_layers.LSTM_Activations/0, where GetParam() = NGRAPH/CPU - Test_ONNX_layers.Quantized_Convolution/0, where GetParam() = NGRAPH/CPU - Test_ONNX_layers.Quantized_Eltwise_Scalar/0, where GetParam() = NGRAPH/CPU - Test_TFLite.EfficientDet_int8/0, where GetParam() = NGRAPH/CPU ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [ ] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake --- .github/workflows/PR-5.x.yaml | 3 + modules/dnn/src/ie_ngraph.cpp | 71 +++++++++++++++---- modules/dnn/src/ie_ngraph.hpp | 4 ++ .../dnn/src/int8layers/convolution_layer.cpp | 9 +-- modules/dnn/src/layer_internals.hpp | 3 +- modules/dnn/src/layers/blank_layer.cpp | 3 +- modules/dnn/src/layers/cast_layer.cpp | 14 +++- modules/dnn/src/layers/const_layer.cpp | 15 +--- modules/dnn/src/layers/cumsum_layer.cpp | 38 ++++++++++ modules/dnn/src/layers/expand_layer.cpp | 27 ++++++- .../dnn/src/layers/gather_elements_layer.cpp | 30 +++++++- modules/dnn/src/layers/gather_layer.cpp | 18 ++++- modules/dnn/src/layers/padding_layer.cpp | 23 +++++- modules/dnn/src/layers/scatterND_layer.cpp | 17 ++++- modules/dnn/src/layers/scatter_layer.cpp | 34 ++++++++- modules/dnn/src/layers/tile_layer.cpp | 17 ++++- modules/dnn/src/net_impl.cpp | 5 +- modules/dnn/src/op_inf_engine.cpp | 3 + modules/dnn/test/test_int.cpp | 50 ++++++++++++- modules/dnn/test/test_layers.cpp | 3 + modules/dnn/test/test_onnx_importer.cpp | 34 ++++++++- modules/dnn/test/test_tflite_importer.cpp | 3 + 22 files changed, 374 insertions(+), 50 deletions(-) diff --git a/.github/workflows/PR-5.x.yaml b/.github/workflows/PR-5.x.yaml index 96511e765e3c..25cbd260f921 100644 --- a/.github/workflows/PR-5.x.yaml +++ b/.github/workflows/PR-5.x.yaml @@ -15,6 +15,9 @@ jobs: Ubuntu2004-x64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U20.yaml@main + Ubuntu2004-x64-OpenVINO: + uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U20-OpenVINO.yaml@main + Ubuntu2204-x64: uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U22.yaml@main diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index 6e7b9f9be5aa..432cce4924fb 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -48,6 +48,48 @@ ngraphWrappers(const std::vector >& ptrs) return wrappers; } +ov::element::Type cvTypeToOvType(MatType cvType) +{ + switch (cvType) { + case CV_32F: + return ov::element::f32; + case CV_8U: + return ov::element::u8; + case CV_8S: + return ov::element::i8; + case CV_32S: + return ov::element::i32; + case CV_64S: + return ov::element::i64; + default: + CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(cvType).c_str())); + } +} + +ov::element::Type cvTypeToOvType(const cv::Mat& mat) +{ + return cvTypeToOvType(mat.depth()); +} + +MatType ovTypeToCvType(ov::element::Type ovType) +{ + switch (ovType) { + case ov::element::f32: + return CV_32F; + case ov::element::u8: + return CV_8U; + case ov::element::i8: + return CV_8S; + case ov::element::i32: + return CV_32S; + case ov::element::i64: + return CV_64S; + default: + CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", ovType.get_type_name().c_str())); + } +} + + class NgraphCustomOp: public ov::op::Op { public: OPENVINO_OP(kOpenCVLayersType); @@ -60,6 +102,19 @@ class NgraphCustomOp: public ov::op::Op { void validate_and_infer_types() override { + std::vector inputTypes(get_input_size()); + std::vector internalTypes; + std::vector outputTypes; + for (int i = 0; i < get_input_size(); ++i) + { + inputTypes[i] = ovTypeToCvType(get_input_element_type(i)); + } + cvLayer->getTypes(inputTypes, outputs.size(), internals.size(), outputTypes, internalTypes); + for (int i = 0; i < internals.size(); ++i) { + if (internals[i].depth() != internalTypes[i]) + internals[i] = cv::Mat(shape(internals[i]), internalTypes[i]); + } + set_output_size(outputs.size()); for (int i = 0; i < outputs.size(); ++i) { @@ -67,7 +122,7 @@ class NgraphCustomOp: public ov::op::Op { for (int j = 0; j < outputs[i].dims; ++j) { shape.push_back(outputs[i].size[j]); } - set_output_type(i, get_input_element_type(0), shape); + set_output_type(i, cvTypeToOvType(outputTypes[i]), shape); } } @@ -270,7 +325,7 @@ ov::ParameterVector InfEngineNgraphNet::setInputs(const std::vector& in for (size_t i = 0; i < inputs.size(); i++) { std::vector shape = getShape(inputs[i]); - auto inp = std::make_shared(ov::element::f32, ov::Shape(shape)); + auto inp = std::make_shared(cvTypeToOvType(inputs[i]), ov::Shape(shape)); inp->set_friendly_name(names[i]); auto it = std::find_if(inputs_vec.begin(), inputs_vec.end(), @@ -427,16 +482,7 @@ void NgraphBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays ov::Tensor wrapToNgraphBlob(const Mat& m) { std::vector shape = getShape(m); - if (m.type() == CV_32F) - return ov::Tensor(ov::element::f32, shape, m.data); - else if (m.type() == CV_8U) - return ov::Tensor(ov::element::u8, shape, m.data); - else if (m.type() == CV_8SC1) - return ov::Tensor(ov::element::i8, shape, m.data); - else if (m.type() == CV_32SC1) - return ov::Tensor(ov::element::i32, shape, m.data); - else - CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str())); + return ov::Tensor(cvTypeToOvType(m), shape, m.data); } @@ -445,6 +491,7 @@ NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m) , host((Mat*)&m) { blob = wrapToNgraphBlob(m); + hostMatDepth = m.depth(); } NgraphBackendWrapper::NgraphBackendWrapper(Ptr wrapper) diff --git a/modules/dnn/src/ie_ngraph.hpp b/modules/dnn/src/ie_ngraph.hpp index 19e07d62ac32..df465f649191 100644 --- a/modules/dnn/src/ie_ngraph.hpp +++ b/modules/dnn/src/ie_ngraph.hpp @@ -29,6 +29,10 @@ namespace cv { namespace dnn { #ifdef HAVE_DNN_NGRAPH +ov::element::Type cvTypeToOvType(MatType cvType); +ov::element::Type cvTypeToOvType(const cv::Mat& mat); +MatType ovTypeToCvType(ov::element::Type ovType); + class InfEngineNgraphNode; class InfEngineNgraphNet diff --git a/modules/dnn/src/int8layers/convolution_layer.cpp b/modules/dnn/src/int8layers/convolution_layer.cpp index 25132542cde0..18c9205c22f6 100644 --- a/modules/dnn/src/int8layers/convolution_layer.cpp +++ b/modules/dnn/src/int8layers/convolution_layer.cpp @@ -570,6 +570,7 @@ class ConvolutionLayerInt8Impl CV_FINAL : public BaseConvolutionLayerInt8Impl CV_Assert(!blobs.empty()); CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1); CV_CheckTypeEQ(weightsMat.type(), CV_8S, ""); + CV_CheckTypeEQ(blobs[0].type(), CV_8S, ""); auto ieInpNode = nodes[0].dynamicCast()->node; std::vector dims = ieInpNode.get_shape(); CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, ""); @@ -581,7 +582,7 @@ class ConvolutionLayerInt8Impl CV_FINAL : public BaseConvolutionLayerInt8Impl const int inpGroupCn = nodes.size() > 1 ? ieWeights.get_shape()[1] : blobs[0].size[1]; const int group = inpCn / inpGroupCn; - std::vector kernel_shape; + std::vector kernel_shape; if (group != 1) { kernel_shape.push_back(group); @@ -592,7 +593,7 @@ class ConvolutionLayerInt8Impl CV_FINAL : public BaseConvolutionLayerInt8Impl if (nodes.size() == 1) { - ieWeights = std::make_shared(ov::element::i8, kernel_shape, blobs[0].data); + ieWeights = std::make_shared(ov::element::i8, ov::Shape(kernel_shape.begin(), kernel_shape.end()), blobs[0].data); } else { @@ -655,7 +656,7 @@ class ConvolutionLayerInt8Impl CV_FINAL : public BaseConvolutionLayerInt8Impl pad_type); } - std::vector shape(conv_node.get_shape().size(), 1); + std::vector shape(conv_node.get_shape().size(), 1); shape[1] = conv_node.get_shape()[1]; if (biasvec.size() || nodes.size() == 3) { @@ -672,7 +673,7 @@ class ConvolutionLayerInt8Impl CV_FINAL : public BaseConvolutionLayerInt8Impl for (int i = 0; i < numOutput; ++i) { ovBias[i] = (biasvec[i] + input_zp * cv::sum(blobs[0].row(i))[0]) * outputMultiplier[i] * output_sc; } - bias = std::make_shared(ov::element::f32, ov::Shape(shape), ovBias.data()); + bias = std::make_shared(ov::element::f32, ov::Shape(shape.begin(), shape.end()), ovBias.data()); } conv_node = std::make_shared(conv_node, bias, ov::op::AutoBroadcastType::NUMPY); } diff --git a/modules/dnn/src/layer_internals.hpp b/modules/dnn/src/layer_internals.hpp index eefb06a23090..be2163d59440 100644 --- a/modules/dnn/src/layer_internals.hpp +++ b/modules/dnn/src/layer_internals.hpp @@ -134,7 +134,8 @@ struct DataLayer : public Layer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 7d4e63dfb406..bc6dacc2b6b1 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -161,8 +161,7 @@ class BlankLayerImpl CV_FINAL : public BlankLayer const std::vector >& nodes) CV_OVERRIDE { auto ieInpNode = nodes[0].dynamicCast()->node; - ov::OutputVector inp{ieInpNode}; - auto blank = std::make_shared(inp, 0); + auto blank = std::make_shared(ieInpNode, ieInpNode); return Ptr(new InfEngineNgraphNode(blank)); } #endif // HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/cast_layer.cpp b/modules/dnn/src/layers/cast_layer.cpp index 4c812bd7004e..07f6c2cbf4eb 100644 --- a/modules/dnn/src/layers/cast_layer.cpp +++ b/modules/dnn/src/layers/cast_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include "layers_common.hpp" @@ -19,7 +21,8 @@ class CastLayerImpl CV_FINAL : public CastLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } virtual bool getMemoryShapes(const std::vector &inputs, @@ -83,6 +86,15 @@ class CastLayerImpl CV_FINAL : public CastLayer inputs[0].convertTo(outputs[0], outputs[0].depth()); } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + auto cast = std::make_shared(nodes[0].dynamicCast()->node, cvTypeToOvType(outputType)); + return Ptr(new InfEngineNgraphNode(cast)); + } +#endif // HAVE_DNN_NGRAPH + private: int outputType; }; diff --git a/modules/dnn/src/layers/const_layer.cpp b/modules/dnn/src/layers/const_layer.cpp index 68f60e0576a6..84120c8e27fc 100644 --- a/modules/dnn/src/layers/const_layer.cpp +++ b/modules/dnn/src/layers/const_layer.cpp @@ -142,23 +142,10 @@ class ConstLayerImpl CV_FINAL : public ConstLayer virtual Ptr initNgraph(const std::vector >& inputs, const std::vector >& nodes) CV_OVERRIDE { - ov::element::Type dType; - if (blobs[0].depth() == CV_32F) { - dType = ov::element::f32; - } else if (blobs[0].depth() == CV_32S) { - dType = ov::element::i32; - } else if (blobs[0].depth() == CV_8S) { - dType = ov::element::i8; - } else { - CV_Error(Error::StsNotImplemented, format("Unexpected Const data depth: %d", blobs[0].depth())); - } std::shared_ptr node = - std::make_shared(dType, + std::make_shared(cvTypeToOvType(blobs[0]), getShape(blobs[0]), blobs[0].data); - if (node->get_element_type() != ov::element::f32) { - node = std::make_shared(node, ov::element::f32); - } return Ptr(new InfEngineNgraphNode(node)); } #endif // HAVE_DNN_NGRAPH diff --git a/modules/dnn/src/layers/cumsum_layer.cpp b/modules/dnn/src/layers/cumsum_layer.cpp index 4895cb8f1785..d7b028ac5f5e 100644 --- a/modules/dnn/src/layers/cumsum_layer.cpp +++ b/modules/dnn/src/layers/cumsum_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include "layers_common.hpp" #include @@ -23,6 +25,12 @@ class CumSumLayerImpl CV_FINAL : public CumSumLayer setParamsFrom(params); } + virtual bool supportBackend(int backendId) CV_OVERRIDE + { + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; + } + bool getMemoryShapes(const std::vector &inputs, const int requiredOutputs, std::vector &outputs, @@ -151,6 +159,36 @@ class CumSumLayerImpl CV_FINAL : public CumSumLayer } } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + std::shared_ptr cumsum; + if (nodes.size() == 2) + { + int32_t axis_shape = 1; + auto axis_scalar = std::make_shared( + nodes[1].dynamicCast()->node, + std::make_shared(ov::element::i32, ov::Shape{}, &axis_shape), + false); + cumsum = std::make_shared( + nodes[0].dynamicCast()->node, + std::make_shared(axis_scalar, ov::element::i32), + exclusive_raw, + reverse_raw); + } + else + { + cumsum = std::make_shared( + nodes[0].dynamicCast()->node, + std::make_shared(ov::element::i32, ov::Shape{}, &axis_raw), + exclusive_raw, + reverse_raw); + } + return Ptr(new InfEngineNgraphNode(cumsum)); + } +#endif // HAVE_DNN_NGRAPH + int axis_raw; int exclusive_raw; int reverse_raw; diff --git a/modules/dnn/src/layers/expand_layer.cpp b/modules/dnn/src/layers/expand_layer.cpp index d233f5be624a..71671848f6dd 100644 --- a/modules/dnn/src/layers/expand_layer.cpp +++ b/modules/dnn/src/layers/expand_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include namespace cv { namespace dnn { @@ -27,8 +29,10 @@ class ExpandLayerImpl CV_FINAL : public ExpandLayer const_input_1d = params.get("const_input_1d", false); } - virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + virtual bool supportBackend(int backendId) CV_OVERRIDE + { + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } virtual bool getMemoryShapes(const std::vector &inputs, @@ -145,6 +149,25 @@ class ExpandLayerImpl CV_FINAL : public ExpandLayer } } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + auto input_shape = nodes[0].dynamicCast()->node.get_shape(); + CV_CheckGE(target_shape.size(), input_shape.size(), ""); + + std::vector output_shape(target_shape.begin(), target_shape.end()); + for (int i = 1; i < input_shape.size() + 1; ++i) + output_shape[output_shape.size() - i] = std::max( + (int32_t)input_shape[input_shape.size() - i], + output_shape[output_shape.size() - i]); + + auto shape_node = std::make_shared(ov::element::i32, ov::Shape{output_shape.size()}, output_shape.data()); + auto expand = std::make_shared(nodes[0].dynamicCast()->node, shape_node); + return Ptr(new InfEngineNgraphNode(expand)); + } +#endif // HAVE_DNN_NGRAPH + private: MatShape target_shape; bool const_input_1d; diff --git a/modules/dnn/src/layers/gather_elements_layer.cpp b/modules/dnn/src/layers/gather_elements_layer.cpp index 4bc0f41cde37..c1ed5aa7a1a9 100644 --- a/modules/dnn/src/layers/gather_elements_layer.cpp +++ b/modules/dnn/src/layers/gather_elements_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include namespace cv { namespace dnn { @@ -30,7 +32,8 @@ class GatherElementsLayerImpl CV_FINAL : public GatherElementsLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } virtual bool getMemoryShapes(const std::vector &inputs, @@ -176,6 +179,31 @@ class GatherElementsLayerImpl CV_FINAL : public GatherElementsLayer }; } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + int64_t indicesBoundInt64 = nodes[0].dynamicCast()->node.get_shape()[axis]; + int32_t indicesBoundInt32 = indicesBoundInt64; + std::shared_ptr indicesBound; + if (nodes[1].dynamicCast()->node.get_element_type() == ov::element::i32) + indicesBound = std::make_shared(ov::element::i32, ov::Shape{}, &indicesBoundInt32); + else if (nodes[1].dynamicCast()->node.get_element_type() == ov::element::i64) + indicesBound = std::make_shared(ov::element::i64, ov::Shape{}, &indicesBoundInt64); + else + CV_Error(Error::StsNotImplemented, ""); + + auto indicesNonNegative = std::make_shared( + std::make_shared(nodes[1].dynamicCast()->node, indicesBound), + indicesBound); + auto gatherElements = std::make_shared( + nodes[0].dynamicCast()->node, + indicesNonNegative, + axis); + return Ptr(new InfEngineNgraphNode(gatherElements)); + } +#endif // HAVE_DNN_NGRAPH + private: int axis; }; diff --git a/modules/dnn/src/layers/gather_layer.cpp b/modules/dnn/src/layers/gather_layer.cpp index 3117b618e927..4e44069a2867 100644 --- a/modules/dnn/src/layers/gather_layer.cpp +++ b/modules/dnn/src/layers/gather_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include "layers_common.hpp" @@ -20,7 +22,8 @@ class GatherLayerImpl CV_FINAL : public GatherLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } virtual bool getMemoryShapes(const std::vector &inputs, @@ -115,6 +118,19 @@ class GatherLayerImpl CV_FINAL : public GatherLayer } } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + auto axisNode = std::make_shared(ov::element::i32, ov::Shape{}, &m_axis); + auto gather = std::make_shared( + nodes[0].dynamicCast()->node, + nodes[1].dynamicCast()->node, + axisNode); + return Ptr(new InfEngineNgraphNode(gather)); + } +#endif // HAVE_DNN_NGRAPH + private: // The axis to gather along int m_axis; diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index ecc5b3519aa1..161af4ea9efe 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -271,8 +271,29 @@ class PaddingLayerImpl CV_FINAL : public PaddingLayer auto padding_below = std::make_shared(ov::element::i64, ov::Shape{begins.size()}, begins.data()); auto padding_above = std::make_shared(ov::element::i64, ov::Shape{ends.size()}, ends.data()); auto pad_mode = paddingType == "constant" ? ov::op::PadMode::CONSTANT : ov::op::PadMode::REFLECT; // SYMMETRIC + + std::shared_ptr arg_pad_value; float paddingValueFloat = paddingValue; - auto arg_pad_value = std::make_shared(ov::element::f32, ov::Shape{}, &paddingValueFloat); + int8_t paddingValueInt8 = paddingValue; + int32_t paddingValueInt32 = paddingValue; + int64_t paddingValueInt64 = paddingValue; + switch(ieInpNode.get_element_type()) + { + case ov::element::f32: + arg_pad_value = std::make_shared(ov::element::f32, ov::Shape{}, &paddingValueFloat); + break; + case ov::element::i8: + arg_pad_value = std::make_shared(ov::element::i8, ov::Shape{}, &paddingValueInt8); + break; + case ov::element::i32: + arg_pad_value = std::make_shared(ov::element::i32, ov::Shape{}, &paddingValueInt32); + break; + case ov::element::i64: + arg_pad_value = std::make_shared(ov::element::i64, ov::Shape{}, &paddingValueInt64); + break; + default: + CV_Error(Error::BadDepth, ""); + }; auto pad = paddingType == "constant" ? std::make_shared(ieInpNode, padding_below, padding_above, arg_pad_value, pad_mode) : diff --git a/modules/dnn/src/layers/scatterND_layer.cpp b/modules/dnn/src/layers/scatterND_layer.cpp index 41c1a85a014a..bcc3774e950f 100644 --- a/modules/dnn/src/layers/scatterND_layer.cpp +++ b/modules/dnn/src/layers/scatterND_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include "layers_common.hpp" #include // for std::max & std::min @@ -42,7 +44,8 @@ class ScatterNDLayerImpl CV_FINAL : public ScatterNDLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && reduction == REDUCTION::NONE); } virtual bool getMemoryShapes(const std::vector &inputs, @@ -240,6 +243,18 @@ class ScatterNDLayerImpl CV_FINAL : public ScatterNDLayer CV_Error(Error::StsBadArg, "Unsupported reduction."); }; } + +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + auto scatterND = std::make_shared( + nodes[0].dynamicCast()->node, + nodes[1].dynamicCast()->node, + nodes[2].dynamicCast()->node); + return Ptr(new InfEngineNgraphNode(scatterND)); + } +#endif // HAVE_DNN_NGRAPH }; Ptr ScatterNDLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/scatter_layer.cpp b/modules/dnn/src/layers/scatter_layer.cpp index 4f2d36e0a18a..c10bfd2c54e7 100644 --- a/modules/dnn/src/layers/scatter_layer.cpp +++ b/modules/dnn/src/layers/scatter_layer.cpp @@ -3,6 +3,8 @@ // of this distribution and at http://opencv.org/license.html. #include "../precomp.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" #include "layers_common.hpp" #include // for std::max & std::min @@ -43,7 +45,8 @@ class ScatterLayerImpl CV_FINAL : public ScatterLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && reduction == REDUCTION::NONE); } virtual bool getMemoryShapes(const std::vector &inputs, @@ -236,6 +239,35 @@ class ScatterLayerImpl CV_FINAL : public ScatterLayer }; } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + int64_t indicesBoundInt64 = nodes[0].dynamicCast()->node.get_shape()[axis]; + int32_t indicesBoundInt32 = indicesBoundInt64; + std::shared_ptr indicesBound; + + if (nodes[1].dynamicCast()->node.get_element_type() == ov::element::i32) + indicesBound = std::make_shared(ov::element::i32, ov::Shape{}, &indicesBoundInt32); + else if (nodes[1].dynamicCast()->node.get_element_type() == ov::element::i64) + indicesBound = std::make_shared(ov::element::i64, ov::Shape{}, &indicesBoundInt64); + else + CV_Error(Error::StsNotImplemented, ""); + + auto indicesNonNegative = std::make_shared( + std::make_shared(nodes[1].dynamicCast()->node, indicesBound), + indicesBound); + + auto axis_node = std::make_shared(ov::element::i32, ov::Shape{}, &axis); + auto scatterElements = std::make_shared( + nodes[0].dynamicCast()->node, + indicesNonNegative, + nodes[2].dynamicCast()->node, + axis_node); + return Ptr(new InfEngineNgraphNode(scatterElements)); + } +#endif // HAVE_DNN_NGRAPH + private: // Attributes int axis; diff --git a/modules/dnn/src/layers/tile_layer.cpp b/modules/dnn/src/layers/tile_layer.cpp index 7b8d97089a66..3d766ca75b6a 100644 --- a/modules/dnn/src/layers/tile_layer.cpp +++ b/modules/dnn/src/layers/tile_layer.cpp @@ -4,6 +4,9 @@ #include "../precomp.hpp" #include "layers_common.hpp" +#include "../op_inf_engine.hpp" +#include "../ie_ngraph.hpp" + #include @@ -31,7 +34,8 @@ class TileLayerImpl CV_FINAL : public TileLayer virtual bool supportBackend(int backendId) CV_OVERRIDE { - return backendId == DNN_BACKEND_OPENCV; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } virtual bool getMemoryShapes(const std::vector &inputs, @@ -108,6 +112,17 @@ class TileLayerImpl CV_FINAL : public TileLayer tmp.copyTo(out); } +#ifdef HAVE_DNN_NGRAPH + virtual Ptr initNgraph(const std::vector >& inputs, + const std::vector >& nodes) CV_OVERRIDE + { + auto repeats_node = std::make_shared(ov::element::i32, ov::Shape{repeats.size()}, repeats.data()); + auto tile = std::make_shared(nodes[0].dynamicCast()->node, repeats_node); + return Ptr(new InfEngineNgraphNode(tile)); + } +#endif // HAVE_DNN_NGRAPH + + private: std::vector repeats; }; diff --git a/modules/dnn/src/net_impl.cpp b/modules/dnn/src/net_impl.cpp index 472a73d7878f..8fee9250e3d3 100644 --- a/modules/dnn/src/net_impl.cpp +++ b/modules/dnn/src/net_impl.cpp @@ -469,8 +469,8 @@ void Net::Impl::allocateLayer(int lid, const LayersShapesMap& layersShapes) for (std::set::const_iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++) allocateLayer(*i, layersShapes); - // bind inputs - if (ld.id == 0 && netInputLayer->supportBackend(preferableBackend)) // DataLayer + // bind inputs for DataLayer + if (ld.id == 0 && netInputLayer->supportBackend(preferableBackend)) { ninputs = netInputLayer->inputsData.size(); ld.inputBlobsWrappers.resize(ninputs); @@ -1467,6 +1467,7 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor { ld.outputBlobsWrappers[pin.oid]->setHostDirty(); } + netInputLayer->scaleFactors[pin.oid] = scalefactor; netInputLayer->means[pin.oid] = mean; netWasAllocated = netWasAllocated && oldShape; diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 04f1da7c7150..9b34e2d31c7f 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -49,6 +49,9 @@ Mat infEngineBlobToMat(const ov::Tensor& blob) switch (precision) { case ov::element::f32: type = CV_32F; break; + case ov::element::i8: type = CV_8S; break; + case ov::element::i32: type = CV_32S; break; + case ov::element::i64: type = CV_64S; break; case ov::element::u8: type = CV_8U; break; default: CV_Error(Error::StsNotImplemented, "Unsupported blob precision"); diff --git a/modules/dnn/test/test_int.cpp b/modules/dnn/test/test_int.cpp index a17b0b4bb102..64b328250a2b 100644 --- a/modules/dnn/test/test_int.cpp +++ b/modules/dnn/test/test_int.cpp @@ -31,6 +31,8 @@ TEST_P(Test_NaryEltwise_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input1(inShape, matType); cv::randu(input1, low, low + 100); Mat input2(inShape, matType); @@ -40,7 +42,7 @@ TEST_P(Test_NaryEltwise_Int, random) LayerParams lp; lp.type = "NaryEltwise"; lp.name = "testLayer"; - lp.set("operation", "sum"); + lp.set("operation", "add"); int id = net.addLayerToPrev(lp.name, lp.type, lp); net.connect(0, 1, id, 1); @@ -98,6 +100,8 @@ TEST_P(Test_Const_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input1(inShape, matType); cv::randu(input1, low, low + 100); Mat inputConst(inShape, matType); @@ -114,7 +118,7 @@ TEST_P(Test_Const_Int, random) LayerParams lp; lp.type = "NaryEltwise"; lp.name = "testLayer"; - lp.set("operation", "sum"); + lp.set("operation", "add"); int idSum = net.addLayer(lp.name, lp.type, lp); net.connect(0, 0, idSum, 0); @@ -170,6 +174,8 @@ TEST_P(Test_ScatterND_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -275,6 +281,8 @@ TEST_P(Test_Concat_Int, random) Target target = get<1>(backend_target); int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations std::vector inShape1{2, 3, 4, 5}; Mat input1(inShape1, matType); cv::randu(input1, low, low + 100); @@ -358,8 +366,13 @@ TEST_P(Test_ArgMax_Int, random) Backend backend = get<0>(backend_target); Target target = get<1>(backend_target); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32 + std::vector inShape{5, 4, 3, 2}; int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -433,6 +446,8 @@ TEST_P(Test_Blank_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -490,6 +505,8 @@ TEST_P(Test_Expand_Int, random) std::vector inShape{2, 3, 1, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); std::vector outShape{2, 1, 4, 5}; @@ -554,6 +571,8 @@ TEST_P(Test_Permute_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); std::vector order{0, 2, 3, 1}; @@ -619,6 +638,8 @@ TEST_P(Test_GatherElements_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -692,6 +713,8 @@ TEST_P(Test_Gather_Int, random) std::vector inShape{5, 1}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -784,7 +807,9 @@ TEST_P(Test_Pad_Int, random) Target target = get<1>(backend_target); std::vector inShape{2, 3, 4, 5}; - int64_t low = 1000000; + int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); std::vector paddings{0, 0, 0, 0, 1, 0, 0, 1}; @@ -860,6 +885,8 @@ TEST_P(Test_Slice_Int, random) std::vector begin{0, 4, 0, 0}; std::vector end{1, 8, 6, 8}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inputShape, matType); cv::randu(input, low, low + 100); @@ -900,6 +927,8 @@ TEST_P(Test_Reshape_Int, random) std::vector inShape{2, 3, 4, 5}; std::vector outShape{2, 3, 2, 10}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -948,6 +977,8 @@ TEST_P(Test_Flatten_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); @@ -994,6 +1025,8 @@ TEST_P(Test_Tile_Int, random) std::vector inShape{2, 3, 4, 5}; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); std::vector repeats{1, 1, 2, 3}; @@ -1056,13 +1089,19 @@ TEST_P(Test_Reduce_Int, random) Backend backend = get<0>(backend_target); Target target = get<1>(backend_target); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && matType == CV_64S) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32 + std::vector inShape{5, 4, 3, 2}; int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 100000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); std::vector axes{1}; Net net; + LayerParams lp; lp.type = "Reduce"; lp.name = "testLayer"; @@ -1119,8 +1158,13 @@ TEST_P(Test_Reduce_Int, two_axes) Backend backend = get<0>(backend_target); Target target = get<1>(backend_target); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && matType == CV_64S) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32 + std::vector inShape{5, 4, 3, 2}; int64_t low = matType == CV_64S ? 100000000000000ll : 10000000; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + low = 10000000; // Looks like OpenVINO uses int32 internal values for int64 operations Mat input(inShape, matType); cv::randu(input, low, low + 100); std::vector axes{1, 3}; diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 56707fe45e54..bf3bea07c8ed 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -362,6 +362,9 @@ TEST_P(Test_Caffe_layers, PReLU) // TODO: fix an unstable test case TEST_P(Test_Caffe_layers, layer_prelu_fc) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO + if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); // Reference output values are in range [-0.0001, 10.3906] diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 99bb789fb7ba..5e850a21917f 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -1303,6 +1303,9 @@ TEST_P(Test_ONNX_layers, Split_EltwiseMax) TEST_P(Test_ONNX_layers, LSTM_Activations) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO + #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000) // IE exception: Node Block1326/lstm/reshape_0/permute was not assigned on any pointed device if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) @@ -2012,6 +2015,9 @@ TEST_P(Test_ONNX_layers, Gemm_bias) TEST_P(Test_ONNX_layers, Quantized_Convolution) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO + // The difference of QOperator and QDQ format: // https://onnxruntime.ai/docs/performance/quantization.html#onnx-quantization-representation-format. { @@ -2058,6 +2064,8 @@ TEST_P(Test_ONNX_layers, Quantized_Eltwise) TEST_P(Test_ONNX_layers, Quantized_Eltwise_Scalar) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO testONNXModels("quantized_eltwise_scalar"); } @@ -2651,23 +2659,43 @@ TEST_P(Test_ONNX_layers, CumSum) testONNXModels("cumsum_2d_dim_1"); testONNXModels("cumsum_3d_dim_2"); testONNXModels("cumsum_3d_dim_2_int32"); +} + +TEST_P(Test_ONNX_layers, CumSum_int64) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations testONNXModels("cumsum_3d_dim_2_int64"); } -TEST_P(Test_ONNX_layers, ReduceSumInt) +TEST_P(Test_ONNX_layers, ReduceSumInt64) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations testONNXModels("reduce_sum_int64"); } -TEST_P(Test_ONNX_layers, ScatterInt) +TEST_P(Test_ONNX_layers, ScatterInt32) { testONNXModels("scatter_int32", npy, 0, 0, false, true, 3); +} + +TEST_P(Test_ONNX_layers, ScatterInt64) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations testONNXModels("scatter_int64", npy, 0, 0, false, true, 3); } -TEST_P(Test_ONNX_layers, TileInt) +TEST_P(Test_ONNX_layers, TileInt32) { testONNXModels("tile_int32"); +} + +TEST_P(Test_ONNX_layers, TileInt64) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations testONNXModels("tile_int64"); } diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp index b5f89f02cb2f..ab8163e2ac78 100644 --- a/modules/dnn/test/test_tflite_importer.cpp +++ b/modules/dnn/test/test_tflite_importer.cpp @@ -210,6 +210,9 @@ TEST_P(Test_TFLite, max_unpooling) } TEST_P(Test_TFLite, EfficientDet_int8) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO + if (target != DNN_TARGET_CPU || (backend != DNN_BACKEND_OPENCV && backend != DNN_BACKEND_TIMVX && backend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) { throw SkipTestException("Only OpenCV, TimVX and OpenVINO targets support INT8 on CPU");