Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dnn: Support more operators in CANN backend #23401

Merged
merged 7 commits into from Apr 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 4 additions & 2 deletions modules/dnn/include/opencv2/dnn/dnn.hpp
Expand Up @@ -347,10 +347,12 @@ CV__DNN_INLINE_NS_BEGIN
/**
* @brief Returns a CANN backend node
*
* @param inputsWrapper input tensors of this CANN operator
* @param inputs input tensors of CANN operator
* @param outputs output tensors of CANN operator
* @param nodes nodes of input tensors
*/
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes);

/**
Expand Down
3 changes: 2 additions & 1 deletion modules/dnn/src/layer.cpp
Expand Up @@ -84,7 +84,8 @@ Ptr<BackendNode> Layer::initTimVX(void* timVxInfo,
return Ptr<BackendNode>();
}

Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "CANN pipeline of " + type + " layers is not defined.");
Expand Down
5 changes: 3 additions & 2 deletions modules/dnn/src/layers/batch_norm_layer.cpp
Expand Up @@ -392,13 +392,14 @@ class BatchNormLayerImpl CV_FINAL : public BatchNormLayer
#endif // HAVE_HALIDE

#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(nodes.size() == 1);
CV_Assert(blobs.size() == 4); // must have scale, offset, mean and variance

auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
auto channel = x->host->size[1];

// create operator
Expand Down
5 changes: 3 additions & 2 deletions modules/dnn/src/layers/blank_layer.cpp
Expand Up @@ -121,10 +121,11 @@ class BlankLayerImpl CV_FINAL : public BlankLayer
}

#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
auto x_desc = x->getTensorDesc();
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
Expand Down
9 changes: 5 additions & 4 deletions modules/dnn/src/layers/concat_layer.cpp
Expand Up @@ -367,24 +367,25 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer
}

#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputsWrapper.size() == nodes.size());
CV_Assert(inputs.size() == nodes.size());

// create operator
auto op = std::make_shared<ge::op::ConcatD>(name);

// set attributes
int N = inputsWrapper.size();
int N = inputs.size();
op->set_attr_concat_dim(axis);
op->set_attr_N(N);

// set inputs : x (dynamic)
op->create_dynamic_input_x(N);
for (int i = 0; i < N; i++)
{
auto x_i = inputsWrapper[i].dynamicCast<CannBackendWrapper>();
auto x_i = inputs[i].dynamicCast<CannBackendWrapper>();
auto x_i_desc = x_i->getTensorDesc();
auto op_x_i = nodes[i].dynamicCast<CannBackendNode>()->getOp();
op->set_dynamic_input_x(i, *op_x_i, x_i->name.c_str());
Expand Down
3 changes: 2 additions & 1 deletion modules/dnn/src/layers/const_layer.cpp
Expand Up @@ -84,7 +84,8 @@ class ConstLayerImpl CV_FINAL : public ConstLayer
}

#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto mat_shape = shape(blobs[0]);
Expand Down
83 changes: 79 additions & 4 deletions modules/dnn/src/layers/convolution_layer.cpp
Expand Up @@ -782,16 +782,17 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
}

#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(!blobs.empty());
CV_Assert(inputsWrapper.size() == 1);
CV_Assert(inputs.size() == 1);
CV_Assert(nodes.size() == 1);

bool has_bias = hasBias() || fusedBias;

auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
const auto shape_x = x->host->size; // [b, c, h, w]
const int filter_out_channel = blobs[0].size[1];
const int groups = shape_x[1] / filter_out_channel;
Expand Down Expand Up @@ -1611,7 +1612,8 @@ class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
#endif // HAVE_INF_ENGINE
{
return backendId == DNN_BACKEND_CUDA ||
(kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE));
(kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE)) ||
(kernel_size.size() == 2 && backendId == DNN_BACKEND_CANN);
}
}

Expand Down Expand Up @@ -2272,6 +2274,79 @@ class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
return Ptr<BackendNode>();
}

#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendWrapper> > &outputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(!blobs.empty());
CV_Assert(inputs.size() == 1);
CV_Assert(nodes.size() == 1);

bool has_bias = hasBias() || fusedBias;

auto x = inputs[0].dynamicCast<CannBackendWrapper>();
auto y = outputs[0].dynamicCast<CannBackendWrapper>();
const auto shape_x = x->host->size; // [N, C, H, W]
const auto shape_y = y->host->size; // [N, C, H, W]
const int filter_out_channel = blobs[0].size[0];
const int groups = shape_x[1] / filter_out_channel;

// create operator
auto op = std::make_shared<ge::op::Conv2DTransposeD>(name);

// set attributes
op->set_attr_input_size(
ge::Operator::OpListInt({(int64_t)shape_y[0],
(int64_t)shape_y[1],
(int64_t)shape_y[2],
(int64_t)shape_y[3],})
);
op->set_attr_strides(
ge::Operator::OpListInt({1, 1, (int64_t)strides[0], (int64_t)strides[1]})
);
op->set_attr_pads(ge::Operator::OpListInt(
{(int64_t)pads_begin[1], (int64_t)pads_end[1], (int64_t)pads_begin[0], (int64_t)pads_end[0]}
));
op->set_attr_dilations(ge::Operator::OpListInt(
{1, 1, (int64_t)dilations[0], (int64_t)dilations[1]}
));
op->set_attr_groups(groups);
op->set_attr_data_format("NCHW");
op->set_attr_output_padding(
ge::Operator::OpListInt({0, 0, (int64_t)adjust_pads[0], (int64_t)adjust_pads[1]}) // adjust_pads: [height, width]
);

// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, x->name.c_str());
auto desc_x = x->getTensorDesc();
op->update_input_desc_x(*desc_x);
// set inputs : weight
const Mat& mat_w = blobs[0];
auto op_const_w = std::make_shared<CannConstOp>(mat_w.data, mat_w.type(), shape(mat_w), cv::format("%s_w", name.c_str()));
op->set_input_filter(*(op_const_w->getOp()));
op->update_input_desc_filter(*(op_const_w->getTensorDesc()));
// set inputs : bias
if (has_bias)
{
int out_channel = blobs[0].size[0];
const Mat& mat_b = blobs[1];

std::vector<int> shape_b{out_channel};
auto op_const_b = std::make_shared<CannConstOp>(mat_b.data, mat_b.type(), shape_b, cv::format("%s_b", name.c_str()));
op->set_input_bias(*(op_const_b->getOp()));
op->update_input_desc_bias(*(op_const_b->getTensorDesc()));
}

// set outputs
auto desc_output = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*desc_output);

return Ptr<BackendNode>(new CannBackendNode(op));
}
#endif // HAVE_CANN

#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
Expand Down