Skip to content

Commit

Permalink
Fix HETERO:GPU,CPU plugin issues with unsupported layer
Browse files Browse the repository at this point in the history
  • Loading branch information
dkurt committed Dec 23, 2022
1 parent bd2ab10 commit 4f3e8d8
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 1 deletion.
36 changes: 36 additions & 0 deletions modules/dnn/src/layers/nary_eltwise_layers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "layers_common.hpp"
#include "../op_cuda.hpp"
#include "../op_cann.hpp"
#include "../ie_ngraph.hpp"

#include <opencv2/dnn/shape_utils.hpp>

Expand Down Expand Up @@ -104,6 +105,12 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::DIV ||
op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN;
#endif
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return (op == OPERATION::ADD ||
op == OPERATION::PROD ||
op == OPERATION::GREATER_EQUAL ||
op == OPERATION::LESS_EQUAL
);
if (op == OPERATION::MAX || op == OPERATION::MIN || op == OPERATION::SUM ||
op == OPERATION::PROD || op == OPERATION::DIV)
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
Expand Down Expand Up @@ -743,6 +750,35 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer
CV_Assert(inputs.size());
return inputs.size() * total(outputs[0]);
}

virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
auto& inp0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto& inp1 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;

if (inp0->get_element_type() != inp1->get_element_type()) {
auto dtype = preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD ?
ngraph::element::f16 : ngraph::element::f32;
if (inp0->get_element_type() != dtype)
inp0 = std::make_shared<ngraph::op::v0::Convert>(inp0, dtype);
if (inp1->get_element_type() != dtype)
inp1 = std::make_shared<ngraph::op::v0::Convert>(inp1, dtype);
}

std::shared_ptr<ngraph::Node> node;
if (op == OPERATION::ADD)
node = std::make_shared<ngraph::op::v1::Add>(inp0, inp1);
else if (op == OPERATION::PROD)
node = std::make_shared<ngraph::op::v1::Multiply>(inp0, inp1);
else if (op == OPERATION::GREATER_EQUAL)
node = std::make_shared<ngraph::op::v1::GreaterEqual>(inp0, inp1);
else if (op == OPERATION::LESS_EQUAL)
node = std::make_shared<ngraph::op::v1::LessEqual>(inp0, inp1);
else
CV_Error(Error::StsNotImplemented, "Operation is not implemented for nGraph backend");
return Ptr<BackendNode>(new InfEngineNgraphNode(node));
}
};

Ptr<NaryEltwiseLayer> NaryEltwiseLayer::create(const LayerParams& params)
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/test/test_backends.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,7 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
l1 = 0.4;
lInf = 7.45;
lInf = 7.46;
}
else if (target == DNN_TARGET_CUDA_FP16)
{
Expand Down

0 comments on commit 4f3e8d8

Please sign in to comment.