Skip to content

Commit

Permalink
make 'abcd op 1b11' broadcast support cuda
Browse files Browse the repository at this point in the history
  • Loading branch information
WanliZhong committed Apr 23, 2023
1 parent b0eddeb commit e436029
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 5 deletions.
14 changes: 12 additions & 2 deletions modules/dnn/perf/perf_layer.cpp
Expand Up @@ -66,8 +66,13 @@ struct Layer_NaryEltwise : public TestBaseWithParam<tuple<Backend, Target> >

if (!isRef && backendId == DNN_BACKEND_CUDA)
{
if (a_shape != b_shape)
throw SkipTestException("The test is skipped because inputs with different shapes are not supported.");
if (a_shape.size() != b_shape.size())
throw SkipTestException("The test is skipped because inputs with different shape size are not supported.");

for(int i = 0; i < a_shape.size(); i++)
if (a_shape[i] != b_shape[i] && a_shape[i] != 1 && b_shape[i] != 1)
throw SkipTestException("The test is skipped because inputs are not supported.");

if (nary_eltwise_cuda_deny_ops.find(op) != nary_eltwise_cuda_deny_ops.end())
throw SkipTestException("The operator '" + op + "' is skipped because is not support with cuda currently.");
}
Expand Down Expand Up @@ -215,6 +220,11 @@ PERF_TEST_P_(Layer_NaryEltwise, NHWC_C)
test_layer({N, H, W, C}, {1, C}, "sum");
}

PERF_TEST_P_(Layer_NaryEltwise, NHWC_H)
{
test_layer({N, H, W, C}, {1, H, 1, 1}, "sum");
}

PERF_TEST_P_(Layer_Slice, YOLOv4_tiny_1)
{
const int inputShape[4] = {1, 64, 104, 104};
Expand Down
11 changes: 8 additions & 3 deletions modules/dnn/src/layers/nary_eltwise_layers.cpp
Expand Up @@ -673,12 +673,17 @@ class NaryEltwiseLayerImpl CV_FINAL : public NaryEltwiseLayer
{
auto context = reinterpret_cast<csl::CSLContext*>(context_);

auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
auto input_0_shape = inputs[0].dynamicCast<CUDABackendWrapper>()->getShape();
for (int i = 1; i < inputs.size(); i++)
{
auto from_wrapper = inputs[i].dynamicCast<CUDABackendWrapper>();
if (input_wrapper->getShape() != from_wrapper->getShape())
auto input_i_shape = inputs[i].dynamicCast<CUDABackendWrapper>()->getShape();
if (input_0_shape.size() != input_i_shape.size())
return Ptr<BackendNode>();
// check if the shape can be supported by `eltwise_ops.cu`, or return the default BackendNode
for (int j = 0; j < input_0_shape.size(); j++)
if (input_0_shape[j] != input_i_shape[j] &&
input_0_shape[j] != 1 && input_i_shape[j] != 1)
return Ptr<BackendNode>();
}

cuda4dnn::EltwiseOpType op_ = cuda4dnn::EltwiseOpType::SUM;
Expand Down

0 comments on commit e436029

Please sign in to comment.