Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch to new OpenVINO API after 2022.1 release #22957

Merged
merged 24 commits into from
Dec 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
437 changes: 291 additions & 146 deletions modules/dnn/src/ie_ngraph.cpp

Large diffs are not rendered by default.

20 changes: 10 additions & 10 deletions modules/dnn/src/ie_ngraph.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ class InfEngineNgraphNet
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>* > all_nodes;

InferenceEngine::ExecutableNetwork netExec;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
std::map<std::string, ov::Tensor> allBlobs;
#else
InferenceEngine::BlobMap allBlobs;
#endif
std::string device_name;
bool isInit = false;

Expand All @@ -87,9 +91,7 @@ class InfEngineNgraphNet

InferenceEngine::CNNNetwork cnn;
bool hasNetOwner;
std::unordered_map<std::string, Ptr<InfEngineNgraphNode> > requestedOutputs;

std::map<std::string, InferenceEngine::TensorDesc> outputsDesc;
std::unordered_map<std::string, InfEngineNgraphNode*> requestedOutputs;
};

class InfEngineNgraphNode : public BackendNode
Expand Down Expand Up @@ -123,17 +125,15 @@ class NgraphBackendWrapper : public BackendWrapper
virtual void setHostDirty() CV_OVERRIDE;

Mat* host;
InferenceEngine::DataPtr dataPtr;
std::string name;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ov::Tensor blob;
#else
InferenceEngine::Blob::Ptr blob;
#endif
AsyncArray futureMat;
};

InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr);
InferenceEngine::DataPtr ngraphDataOutputNode(
const Ptr<BackendWrapper>& ptr,
const InferenceEngine::TensorDesc& description,
const std::string name);

// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
Expand Down
10 changes: 5 additions & 5 deletions modules/dnn/src/layers/concat_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -403,25 +403,25 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
const int numDims = data->getDims().size();
const int numDims = nodes[0].dynamicCast<InfEngineNgraphNode>()->node->get_shape().size();
const int cAxis = normalize_axis(axis, numDims);
std::vector<size_t> maxDims(numDims, 0);

CV_Assert(inputs.size() == nodes.size());
ngraph::OutputVector inp_nodes;
for (int i = 0; i < nodes.size(); ++i)
{
inp_nodes.push_back(nodes[i].dynamicCast<InfEngineNgraphNode>()->node);
auto inp = nodes[i].dynamicCast<InfEngineNgraphNode>()->node;
inp_nodes.push_back(inp);

std::vector<size_t> inpShape = ngraphDataNode(inputs[i])->getDims();
std::vector<size_t> inpShape = inp->get_shape();
for (int i = 0; i < numDims; ++i)
maxDims[i] = std::max(maxDims[i], inpShape[i]);
}
for (int i = 0; i < inp_nodes.size(); ++i)
{
bool needPadding = false;
std::vector<size_t> inpShape = ngraphDataNode(inputs[i])->getDims();
std::vector<size_t> inpShape = inp_nodes[i].get_shape();
std::vector<int64_t> begins(inpShape.size(), 0), ends(inpShape.size(), 0);
for (int j = 0; j < inpShape.size(); ++j)
{
Expand Down
38 changes: 38 additions & 0 deletions modules/dnn/src/layers/nary_eltwise_layers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "layers_common.hpp"
#include "../op_cuda.hpp"
#include "../op_cann.hpp"
#include "../ie_ngraph.hpp"

#include <opencv2/dnn/shape_utils.hpp>

Expand Down Expand Up @@ -104,6 +105,12 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::DIV ||
op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN;
#endif
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return (op == OPERATION::ADD ||
op == OPERATION::PROD ||
op == OPERATION::GREATER_EQUAL ||
op == OPERATION::LESS_EQUAL
);
if (op == OPERATION::MAX || op == OPERATION::MIN || op == OPERATION::SUM ||
op == OPERATION::PROD || op == OPERATION::DIV)
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
Expand Down Expand Up @@ -743,6 +750,37 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer
CV_Assert(inputs.size());
return inputs.size() * total(outputs[0]);
}

#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
auto& inp0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto& inp1 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;

if (inp0->get_element_type() != inp1->get_element_type()) {
auto dtype = preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD ?
ngraph::element::f16 : ngraph::element::f32;
if (inp0->get_element_type() != dtype)
inp0 = std::make_shared<ngraph::op::v0::Convert>(inp0, dtype);
if (inp1->get_element_type() != dtype)
inp1 = std::make_shared<ngraph::op::v0::Convert>(inp1, dtype);
}

std::shared_ptr<ngraph::Node> node;
if (op == OPERATION::ADD)
node = std::make_shared<ngraph::op::v1::Add>(inp0, inp1);
else if (op == OPERATION::PROD)
node = std::make_shared<ngraph::op::v1::Multiply>(inp0, inp1);
else if (op == OPERATION::GREATER_EQUAL)
node = std::make_shared<ngraph::op::v1::GreaterEqual>(inp0, inp1);
else if (op == OPERATION::LESS_EQUAL)
node = std::make_shared<ngraph::op::v1::LessEqual>(inp0, inp1);
else
CV_Error(Error::StsNotImplemented, "Operation is not implemented for nGraph backend");
return Ptr<BackendNode>(new InfEngineNgraphNode(node));
}
#endif
};

Ptr<NaryEltwiseLayer> NaryEltwiseLayer::create(const LayerParams& params)
Expand Down
19 changes: 19 additions & 0 deletions modules/dnn/src/layers/resize_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -401,6 +401,24 @@ class ResizeLayerImpl : public ResizeLayer
#else
ngraph::op::v4::Interpolate::InterpolateAttrs attrs;

#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
if (interpolation == "nearest") {
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::NEAREST;
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL;
} else if (interpolation == "bilinear") {
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX;
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC;
} else {
CV_Error(Error::StsNotImplemented, format("Unsupported interpolation: %s", interpolation.c_str()));
}
attrs.shape_calculation_mode = ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES;

if (alignCorners) {
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS;
}

attrs.nearest_mode = ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR;
#else
if (interpolation == "nearest") {
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::nearest;
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::half_pixel;
Expand All @@ -417,6 +435,7 @@ class ResizeLayerImpl : public ResizeLayer
}

attrs.nearest_mode = ngraph::op::v4::Interpolate::NearestMode::round_prefer_floor;
#endif // OpenVINO >= 2022.1

std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
Expand Down
74 changes: 40 additions & 34 deletions modules/dnn/src/net_openvino.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -275,19 +275,17 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName;
dataPtr->setName(outputName);
ld.outputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = outputName;
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name;
dataPtr->setName(outputName);
ld.outputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = outputName;
}
}
}
Expand All @@ -311,26 +309,7 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
{
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.inputBlobsWrappers[i]);
dataPtr->setName(netInputLayer->outNames[i]);
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
auto it = ienet.outputsDesc.find(ld.name);
if (it != ienet.outputsDesc.end())
{
const InferenceEngine::TensorDesc& descriptor = it->second;
InferenceEngine::DataPtr dataPtr = ngraphDataOutputNode(ld.outputBlobsWrappers[i], descriptor, ld.name);
dataPtr->setName(ld.name);
}
else
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
dataPtr->setName(ld.name);
}
ld.inputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = netInputLayer->outNames[i];
}
}
ienet.addBlobs(ld.inputBlobsWrappers);
Expand Down Expand Up @@ -456,10 +435,10 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
dynamicCast<NgraphBackendWrapper>();
CV_Assert(!inpWrapper.empty());
auto iter = std::find(inputNames.begin(), inputNames.end(),
inpWrapper->dataPtr->getName());
inpWrapper->name);
if (iter == inputNames.end())
{
inputNames.push_back(inpWrapper->dataPtr->getName());
inputNames.push_back(inpWrapper->name);
inputs.push_back(inpLd.outputBlobs[cons_inp]);
}
curr_pos = cons + 1;
Expand Down Expand Up @@ -505,7 +484,12 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")");

// Handle parameters from other subnets. Output port is not used in this case
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
if ((ngraph::op::is_parameter(ngraph_input_node) || ngraph::op::is_constant(ngraph_input_node)) &&
#else
if ((ngraph_input_node->is_parameter() || ngraph_input_node->is_constant()) &&
#endif

ngraph_input_node->get_output_size() == 1)
{
inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ngraph_input_node));
Expand Down Expand Up @@ -702,14 +686,33 @@ Net NetImplOpenVINO::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork

CV_TRACE_REGION("register_inputs");

auto ngraphFunction = ieNet.getFunction();
CV_Assert(ngraphFunction);

std::vector<String> inputsNames;
std::vector<MatShape> inp_shapes;
for (auto& it : ieNet.getInputsInfo())
for (auto& it : ngraphFunction->get_parameters())
{
inputsNames.push_back(it.first);
std::vector<size_t> dims = it.second->getTensorDesc().getDims();
inputsNames.push_back(it->get_friendly_name());
std::vector<size_t> dims = it->get_shape();
inp_shapes.push_back(std::vector<int>(dims.begin(), dims.end()));
}
// nGraph models produce output "Result" layers which have "/sink_port" suffix in their names.
// Their inputs are actual model outputs and we change friendly name to it.
// By this workaround, we produce similar outputs names comparing to ieNet.getOutputsInfo()
for (int i = 0; i < ngraphFunction->get_output_size(); ++i) {
auto res = ngraphFunction->output(i);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
const std::string& name = res.get_any_name();
#else
auto out = res.get_node()->input(0).get_source_output();
std::string name = out.get_node()->get_friendly_name();
if (out.get_node()->get_output_size() > 1)
name += "." + std::to_string(out.get_index());
#endif
if (res.get_node()->get_friendly_name() != name)
res.get_node()->set_friendly_name(name);
}

Net cvNet;
Ptr<NetImplOpenVINO> openvino_impl_ptr = makePtr<NetImplOpenVINO>();
Expand All @@ -736,17 +739,15 @@ Net NetImplOpenVINO::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork

CV_TRACE_REGION_NEXT("register_outputs");

auto ngraphFunction = ieNet.getFunction();
CV_Assert(ngraphFunction);
std::vector<std::shared_ptr<ngraph::Node>> ngraphOperations = ngraphFunction->get_ops();

for (auto& it : ieNet.getOutputsInfo())
for (auto& it : ngraphFunction->get_results())
{
CV_TRACE_REGION("output");
const auto& outputName = it.first;
const auto& outputName = it->get_friendly_name();

LayerParams lp;
int lid = cvNet.addLayer(it.first, "", lp);
int lid = cvNet.addLayer(outputName, "", lp);

LayerData& ld = openvino_impl.layers[lid];

Expand Down Expand Up @@ -835,10 +836,15 @@ Net openvino_readNetwork(
InferenceEngine::CNNNetwork ieNet;
try
{
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ov::Tensor weights_blob(ov::element::u8, {bufferWeightsSize}, (void*)bufferWeightsPtr);
ieNet = ie.read_model(model, weights_blob);
#else
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
InferenceEngine::Blob::CPtr weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, (uint8_t*)bufferWeightsPtr, bufferWeightsSize);

ieNet = ie.ReadNetwork(model, weights_blob);
#endif
}
catch (const std::exception& e)
{
Expand Down