diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index dca20adb27c2..5e5e295de329 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -347,10 +347,12 @@ CV__DNN_INLINE_NS_BEGIN /** * @brief Returns a CANN backend node * - * @param inputsWrapper input tensors of this CANN operator + * @param inputs input tensors of CANN operator + * @param outputs output tensors of CANN operator * @param nodes nodes of input tensors */ - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes); /** diff --git a/modules/dnn/src/layer.cpp b/modules/dnn/src/layer.cpp index 730fae4cb804..49ede4fa2521 100644 --- a/modules/dnn/src/layer.cpp +++ b/modules/dnn/src/layer.cpp @@ -84,7 +84,8 @@ Ptr Layer::initTimVX(void* timVxInfo, return Ptr(); } -Ptr Layer::initCann(const std::vector > &inputsWrapper, +Ptr Layer::initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) { CV_Error(Error::StsNotImplemented, "CANN pipeline of " + type + " layers is not defined."); diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 40d80542512b..b90ee934ef51 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -392,13 +392,14 @@ class BatchNormLayerImpl CV_FINAL : public BatchNormLayer #endif // HAVE_HALIDE #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { CV_Assert(nodes.size() == 1); CV_Assert(blobs.size() == 4); // must have scale, offset, mean and variance - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto channel = x->host->size[1]; // create operator diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 0aa65f62a33d..3095e2d6c907 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -121,10 +121,11 @@ class BlankLayerImpl CV_FINAL : public BlankLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto x_desc = x->getTensorDesc(); auto op_x = nodes[0].dynamicCast()->getOp(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); diff --git a/modules/dnn/src/layers/concat_layer.cpp b/modules/dnn/src/layers/concat_layer.cpp index 44ccfc02e878..f8f5152e33d2 100644 --- a/modules/dnn/src/layers/concat_layer.cpp +++ b/modules/dnn/src/layers/concat_layer.cpp @@ -367,16 +367,17 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - CV_Assert(inputsWrapper.size() == nodes.size()); + CV_Assert(inputs.size() == nodes.size()); // create operator auto op = std::make_shared(name); // set attributes - int N = inputsWrapper.size(); + int N = inputs.size(); op->set_attr_concat_dim(axis); op->set_attr_N(N); @@ -384,7 +385,7 @@ class ConcatLayerImpl CV_FINAL : public ConcatLayer op->create_dynamic_input_x(N); for (int i = 0; i < N; i++) { - auto x_i = inputsWrapper[i].dynamicCast(); + auto x_i = inputs[i].dynamicCast(); auto x_i_desc = x_i->getTensorDesc(); auto op_x_i = nodes[i].dynamicCast()->getOp(); op->set_dynamic_input_x(i, *op_x_i, x_i->name.c_str()); diff --git a/modules/dnn/src/layers/const_layer.cpp b/modules/dnn/src/layers/const_layer.cpp index 58cccbd55201..34f958782514 100644 --- a/modules/dnn/src/layers/const_layer.cpp +++ b/modules/dnn/src/layers/const_layer.cpp @@ -84,7 +84,8 @@ class ConstLayerImpl CV_FINAL : public ConstLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { auto mat_shape = shape(blobs[0]); diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 0dcff60072a4..fc0120cdb85c 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -782,16 +782,17 @@ class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { CV_Assert(!blobs.empty()); - CV_Assert(inputsWrapper.size() == 1); + CV_Assert(inputs.size() == 1); CV_Assert(nodes.size() == 1); bool has_bias = hasBias() || fusedBias; - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); const auto shape_x = x->host->size; // [b, c, h, w] const int filter_out_channel = blobs[0].size[1]; const int groups = shape_x[1] / filter_out_channel; @@ -1611,7 +1612,8 @@ class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl #endif // HAVE_INF_ENGINE { return backendId == DNN_BACKEND_CUDA || - (kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE)); + (kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE)) || + (kernel_size.size() == 2 && backendId == DNN_BACKEND_CANN); } } @@ -2272,6 +2274,79 @@ class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl return Ptr(); } +#ifdef HAVE_CANN + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, + const std::vector >& nodes) CV_OVERRIDE + { + CV_Assert(!blobs.empty()); + CV_Assert(inputs.size() == 1); + CV_Assert(nodes.size() == 1); + + bool has_bias = hasBias() || fusedBias; + + auto x = inputs[0].dynamicCast(); + auto y = outputs[0].dynamicCast(); + const auto shape_x = x->host->size; // [N, C, H, W] + const auto shape_y = y->host->size; // [N, C, H, W] + const int filter_out_channel = blobs[0].size[0]; + const int groups = shape_x[1] / filter_out_channel; + + // create operator + auto op = std::make_shared(name); + + // set attributes + op->set_attr_input_size( + ge::Operator::OpListInt({(int64_t)shape_y[0], + (int64_t)shape_y[1], + (int64_t)shape_y[2], + (int64_t)shape_y[3],}) + ); + op->set_attr_strides( + ge::Operator::OpListInt({1, 1, (int64_t)strides[0], (int64_t)strides[1]}) + ); + op->set_attr_pads(ge::Operator::OpListInt( + {(int64_t)pads_begin[1], (int64_t)pads_end[1], (int64_t)pads_begin[0], (int64_t)pads_end[0]} + )); + op->set_attr_dilations(ge::Operator::OpListInt( + {1, 1, (int64_t)dilations[0], (int64_t)dilations[1]} + )); + op->set_attr_groups(groups); + op->set_attr_data_format("NCHW"); + op->set_attr_output_padding( + ge::Operator::OpListInt({0, 0, (int64_t)adjust_pads[0], (int64_t)adjust_pads[1]}) // adjust_pads: [height, width] + ); + + // set inputs + // set inputs : x + auto op_x = nodes[0].dynamicCast()->getOp(); + op->set_input_x_by_name(*op_x, x->name.c_str()); + auto desc_x = x->getTensorDesc(); + op->update_input_desc_x(*desc_x); + // set inputs : weight + const Mat& mat_w = blobs[0]; + auto op_const_w = std::make_shared(mat_w.data, mat_w.type(), shape(mat_w), cv::format("%s_w", name.c_str())); + op->set_input_filter(*(op_const_w->getOp())); + op->update_input_desc_filter(*(op_const_w->getTensorDesc())); + // set inputs : bias + if (has_bias) + { + int out_channel = blobs[0].size[0]; + const Mat& mat_b = blobs[1]; + + std::vector shape_b{out_channel}; + auto op_const_b = std::make_shared(mat_b.data, mat_b.type(), shape_b, cv::format("%s_b", name.c_str())); + op->set_input_bias(*(op_const_b->getOp())); + op->update_input_desc_bias(*(op_const_b->getTensorDesc())); + } + + // set outputs + auto desc_output = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); + op->update_output_desc_y(*desc_output); + + return Ptr(new CannBackendNode(op)); + } +#endif // HAVE_CANN #ifdef HAVE_DNN_NGRAPH virtual Ptr initNgraph(const std::vector > &inputs, diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index bb60410038d8..c7ce703e3e18 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -188,10 +188,11 @@ class ElementWiseLayer : public Func::Layer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - return func.initCannOp(Layer::name, inputsWrapper, nodes); + return func.initCannOp(Layer::name, inputs, nodes); } #endif // HAVE_CANN @@ -461,10 +462,10 @@ struct ReLUFunctor : public BaseFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op_x = nodes[0].dynamicCast()->getOp(); auto x_desc = x->getTensorDesc(); @@ -655,10 +656,10 @@ struct ReLU6Functor : public BaseFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -808,7 +809,7 @@ struct BaseDefaultFunctor : public BaseFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { CV_Error(Error::StsNotImplemented, ""); @@ -930,10 +931,10 @@ struct TanHFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -997,10 +998,10 @@ struct SwishFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1075,10 +1076,10 @@ struct MishFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1151,10 +1152,10 @@ struct SigmoidFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1229,10 +1230,10 @@ struct ELUFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1301,10 +1302,10 @@ struct AbsValFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1363,10 +1364,10 @@ struct BNLLFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1420,10 +1421,10 @@ struct CeilFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -1479,10 +1480,10 @@ struct FloorFunctor : public BaseDefaultFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op = std::make_shared(name); @@ -2334,7 +2335,7 @@ struct PowerFunctor : public BaseFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { CV_Error(Error::StsNotImplemented, ""); @@ -2498,7 +2499,8 @@ struct ChannelsPReLUFunctor : public BaseFunctor #endif return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || - backendId == DNN_BACKEND_HALIDE; + backendId == DNN_BACKEND_HALIDE || + backendId == DNN_BACKEND_CANN; } void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const @@ -2590,10 +2592,10 @@ struct ChannelsPReLUFunctor : public BaseFunctor #ifdef HAVE_CANN Ptr initCannOp(const std::string& name, - const std::vector > &inputsWrapper, + const std::vector > &inputs, const std::vector >& nodes) { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op_x = nodes[0].dynamicCast()->getOp(); auto x_desc = x->getTensorDesc(); diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index 5052bd182364..8ed1b799eb95 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -849,17 +849,18 @@ class EltwiseLayerImpl CV_FINAL : public EltwiseLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - CV_Assert(inputsWrapper.size() == 2); + CV_Assert(inputs.size() == 2); CV_Assert(nodes.size() == 2); auto op_x1 = nodes[0].dynamicCast()->getOp(); - auto x1 = inputsWrapper[0].dynamicCast(); + auto x1 = inputs[0].dynamicCast(); auto x1_desc = x1->getTensorDesc(); auto op_x2 = nodes[1].dynamicCast()->getOp(); - auto x2 = inputsWrapper[1].dynamicCast(); + auto x2 = inputs[1].dynamicCast(); auto x2_desc = x2->getTensorDesc(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); diff --git a/modules/dnn/src/layers/flatten_layer.cpp b/modules/dnn/src/layers/flatten_layer.cpp index 226863fd3e4f..6a502af7e927 100644 --- a/modules/dnn/src/layers/flatten_layer.cpp +++ b/modules/dnn/src/layers/flatten_layer.cpp @@ -176,10 +176,11 @@ class FlattenLayerImpl CV_FINAL : public FlattenLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto x_desc = x->getTensorDesc(); auto op_x = nodes[0].dynamicCast()->getOp(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index d33ebb65155c..556c5175949f 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -662,10 +662,11 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x1 = inputsWrapper[0].dynamicCast(); + auto x1 = inputs[0].dynamicCast(); auto x1_desc = x1->getTensorDesc(); auto op_x1 = nodes[0].dynamicCast()->getOp(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); @@ -689,7 +690,7 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer else { // A and B are variable inputs; non-const bias is not considered - CV_Assert(inputsWrapper.size() == 2); + CV_Assert(inputs.size() == 2); CV_Assert(nodes.size() == 2); // set attributes @@ -698,7 +699,7 @@ class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer // set inputs : x2 (weight) auto op_x2 = nodes[1].dynamicCast()->getOp(); - auto x2_desc = inputsWrapper[1].dynamicCast()->getTensorDesc(); + auto x2_desc = inputs[1].dynamicCast()->getTensorDesc(); op->set_input_x2_by_name(*op_x2, "y"); op->update_input_desc_x2(*x2_desc); } diff --git a/modules/dnn/src/layers/lrn_layer.cpp b/modules/dnn/src/layers/lrn_layer.cpp index 95599afdc111..728a597849c4 100644 --- a/modules/dnn/src/layers/lrn_layer.cpp +++ b/modules/dnn/src/layers/lrn_layer.cpp @@ -445,10 +445,11 @@ class LRNLayerImpl CV_FINAL : public LRNLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); // create operator auto op = std::make_shared(name); diff --git a/modules/dnn/src/layers/nary_eltwise_layers.cpp b/modules/dnn/src/layers/nary_eltwise_layers.cpp index 280920af359e..2c46b1165b82 100644 --- a/modules/dnn/src/layers/nary_eltwise_layers.cpp +++ b/modules/dnn/src/layers/nary_eltwise_layers.cpp @@ -102,7 +102,7 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer { #ifdef HAVE_CANN if (backendId == DNN_BACKEND_CANN) - return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::DIV || + return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::SUB || op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN; #endif if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) @@ -709,22 +709,23 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer #endif #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - CV_Assert(inputsWrapper.size() == 2); + CV_Assert(inputs.size() == 2); CV_Assert(nodes.size() == 2); auto op_x1 = nodes[0].dynamicCast()->getOp(); - auto x1 = inputsWrapper[0].dynamicCast(); + auto x1 = inputs[0].dynamicCast(); auto x1_desc = x1->getTensorDesc(); auto op_x2 = nodes[1].dynamicCast()->getOp(); - auto x2 = inputsWrapper[1].dynamicCast(); + auto x2 = inputs[1].dynamicCast(); auto x2_desc = x2->getTensorDesc(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); std::shared_ptr eltwise_operator = nullptr; - // add, mul, div, max, min + // add, mul, sub, div, max, min switch (op) { #define BUILD_CANN_ELTWISE_OP(op_type, class_name, op_name) \ @@ -740,6 +741,7 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer } break; BUILD_CANN_ELTWISE_OP(OPERATION::ADD, Add, name); BUILD_CANN_ELTWISE_OP(OPERATION::PROD, Mul, name); + BUILD_CANN_ELTWISE_OP(OPERATION::SUB, Sub, name); BUILD_CANN_ELTWISE_OP(OPERATION::DIV, Xdivy, name); BUILD_CANN_ELTWISE_OP(OPERATION::MAX, Maximum, name); BUILD_CANN_ELTWISE_OP(OPERATION::MIN, Minimum, name); diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index 9cbac98e74e7..f66d44b222a6 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -222,10 +222,11 @@ class PaddingLayerImpl CV_FINAL : public PaddingLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); // create operator auto op = std::make_shared(name); diff --git a/modules/dnn/src/layers/permute_layer.cpp b/modules/dnn/src/layers/permute_layer.cpp index c2b20af35082..3fab9802647f 100644 --- a/modules/dnn/src/layers/permute_layer.cpp +++ b/modules/dnn/src/layers/permute_layer.cpp @@ -441,10 +441,11 @@ class PermuteLayerImpl CV_FINAL : public PermuteLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); // create operator auto op = std::make_shared(name); diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 415887b4118a..ff53d4d11484 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -548,10 +548,11 @@ class PoolingLayerImpl CV_FINAL : public PoolingLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto op_x = nodes[0].dynamicCast()->getOp(); auto x_desc = x->getTensorDesc(); auto output_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index e433ee1787bd..a72236c47268 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -327,10 +327,11 @@ class ReshapeLayerImpl CV_FINAL : public ReshapeLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); // create operator auto op = std::make_shared(name); diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index 8f21266e5725..02ac29de8d6e 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -312,10 +312,11 @@ class ResizeLayerImpl : public ResizeLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); auto x_desc = x->getTensorDesc(); auto op_x = nodes[0].dynamicCast()->getOp(); auto output_y_desc = std::make_shared(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT); @@ -334,7 +335,8 @@ class ResizeLayerImpl : public ResizeLayer op->update_input_desc_x(*x_desc); // set inputs : size std::vector shape_of_size_mat{2}; - Mat size_mat(2, 1, CV_32S, Scalar(outHeight, outWidth)); + std::vector size_vec{outHeight, outWidth}; + Mat size_mat(shape_of_size_mat, CV_32S, size_vec.data()); auto op_const_size = std::make_shared(size_mat.data, size_mat.type(), shape_of_size_mat, cv::format("%s_size", name.c_str())); op->set_input_size(*(op_const_size->getOp())); op->update_input_desc_size(*(op_const_size->getTensorDesc())); diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 2bf86995f6ef..d3675e23a536 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -634,11 +634,12 @@ class SliceLayerImpl : public SliceLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { bool isSplit = sliceRanges.size() > 1; - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); if (isSplit) { diff --git a/modules/dnn/src/layers/softmax_layer.cpp b/modules/dnn/src/layers/softmax_layer.cpp index 57afe4dbfa33..4f1bcf59a47c 100644 --- a/modules/dnn/src/layers/softmax_layer.cpp +++ b/modules/dnn/src/layers/softmax_layer.cpp @@ -365,10 +365,11 @@ class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer } #ifdef HAVE_CANN - virtual Ptr initCann(const std::vector > &inputsWrapper, + virtual Ptr initCann(const std::vector > &inputs, + const std::vector > &outputs, const std::vector >& nodes) CV_OVERRIDE { - auto x = inputsWrapper[0].dynamicCast(); + auto x = inputs[0].dynamicCast(); // create operator auto op = std::make_shared(name); diff --git a/modules/dnn/src/net_cann.cpp b/modules/dnn/src/net_cann.cpp index 0b73427e404f..a3eb52200f44 100644 --- a/modules/dnn/src/net_cann.cpp +++ b/modules/dnn/src/net_cann.cpp @@ -117,7 +117,7 @@ void NetImplCann::initBackend(const std::vector& blobsToKeep_) if (ld.id != 0 && !layer->supportBackend(preferableBackend)) { newWasSupported = false; - CV_LOG_INFO(NULL, "DNN/CANN: layer (name=" << ld.name << ", type=" << ld.type << ") is not supported by CANN backend. Going back to CPU backend"); + CV_LOG_ONCE_WARNING(NULL, "DNN/CANN: layer (name=" << ld.name << ", type=" << ld.type << ") is not supported by CANN backend. Going back to default backend on CPU target"); } } } @@ -202,7 +202,7 @@ void NetImplCann::initBackend(const std::vector& blobsToKeep_) } CV_LOG_INFO(NULL, "DNN/CANN: converting layer " << ld.name << "@" << ld.type << "@" << ld.id << " to CANN operator"); - auto backendNode = layer->initCann(ld.inputBlobsWrappers, layerInputNodes); // it's ok if ld.name is empty + auto backendNode = layer->initCann(ld.inputBlobsWrappers, ld.outputBlobsWrappers, layerInputNodes); // it's ok if ld.name is empty // collect outputs bool isOutputNode = ld.consumers.size() == 0 ? true : false;