Skip to content

Commit

Permalink
Merge pull request #24378 from fengyuentau:instance_norm
Browse files Browse the repository at this point in the history
dnn onnx: add instance norm layer #24378

Resolves #24377
Relates #24092 (comment)

| Perf | multi-thread | single-thread |
| - | - | - |
| x: [2, 64, 180, 240] | 3.95ms | 11.12ms |

Todo:

- [x] speed up by multi-threading
- [x] add perf
- [x] add backend: OpenVINO
- [x] add backend: CUDA
- [x] add backend: OpenCL (no fp16)
- [ ] add backend: CANN (will be done via #24462)


### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake

```
force_builders=Linux OpenCL,Win64 OpenCL,Custom
buildworker:Custom=linux-4
build_image:Custom=ubuntu:18.04
modules_filter:Custom=none
disable_ipp:Custom=ON
```
  • Loading branch information
fengyuentau committed Nov 7, 2023
1 parent 832f738 commit ee0822d
Show file tree
Hide file tree
Showing 10 changed files with 454 additions and 43 deletions.
7 changes: 7 additions & 0 deletions modules/dnn/include/opencv2/dnn/all_layers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1166,6 +1166,13 @@ CV__DNN_INLINE_NS_BEGIN
static Ptr<ExpandLayer> create(const LayerParams &params);
};

class CV_EXPORTS InstanceNormLayer : public Layer {
public:
float epsilon;

static Ptr<InstanceNormLayer> create(const LayerParams &params);
};

//! @}
//! @}
CV__DNN_INLINE_NS_END
Expand Down
57 changes: 57 additions & 0 deletions modules/dnn/perf/perf_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -683,6 +683,62 @@ PERF_TEST_P_(Layer_GatherElements, GatherElements)
test_layer({2700, 1, 2914}, {2700, 1, 81}, 2);
}

struct Layer_InstanceNorm : public TestBaseWithParam<tuple<Backend, Target> >
{
void test_layer(const std::vector<int>& x_shape)
{
int backendId = get<0>(GetParam());
int targetId = get<1>(GetParam());

Mat x(x_shape, CV_32FC1);
Mat scale(x_shape[1], 1, CV_32FC1);
Mat b(x_shape[1], 1, CV_32FC1);

randu(x, 0.f, 1.f);
randu(scale, 0.f, 1.f);
randu(b, 0.f, 1.f);

Net net;
LayerParams lp;
lp.type = "InstanceNormalization";
lp.name = "testLayer";
int id = net.addLayerToPrev(lp.name, lp.type, lp);
net.connect(0, 0, id, 0);
net.connect(0, 1, id, 1);
net.connect(0, 2, id, 2);

// warmup
{
std::vector<String> inpNames{"x", "scale", "b"};
net.setInputsNames(inpNames);
net.setInput(x, inpNames[0]);
net.setInput(scale, inpNames[1]);
net.setInput(b, inpNames[2]);

net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
}

TEST_CYCLE()
{
Mat res = net.forward();
}

SANITY_CHECK_NOTHING();
}

int N = 2;
int C = 64;
int H = 180;
int W = 240;
};

PERF_TEST_P_(Layer_InstanceNorm, InstanceNorm)
{
test_layer({N, C, H, W});
}

INSTANTIATE_TEST_CASE_P(/**/, Layer_Slice, dnnBackendsAndTargets(false, false));
INSTANTIATE_TEST_CASE_P(/**/, Layer_NaryEltwise, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
#ifdef HAVE_CUDA
Expand All @@ -693,6 +749,7 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_ScatterND, testing::Values(std::make_tuple(D
INSTANTIATE_TEST_CASE_P(/**/, Layer_LayerNorm, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
INSTANTIATE_TEST_CASE_P(/**/, Layer_LayerNormExpanded, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
INSTANTIATE_TEST_CASE_P(/**/, Layer_GatherElements, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));
INSTANTIATE_TEST_CASE_P(/**/, Layer_InstanceNorm, testing::Values(std::make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU)));


typedef TestBaseWithParam<tuple<Vec4i, int, bool, tuple<Backend, Target> > > Layer_FullyConnected;
Expand Down
28 changes: 28 additions & 0 deletions modules/dnn/src/cuda/mvn.cu
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,17 @@ namespace raw {
output[idx] = (static_cast<float>(input[idx]) - means[outer_idx]) * scale[outer_idx];
}
}

template <class T>
__global__ void normalize_mean_variance_channelwise(Span<T> output, View<T> input, View<T> scale, View<T> bias, View<float> means, View<float> stdev, size_type inner_size, size_type C) {
for (auto idx : grid_stride_range(output.size())) {
const index_type outer_idx = idx / inner_size;
const index_type c = outer_idx % C;
auto s = static_cast<float>(scale[c]) * stdev[outer_idx];
auto b = static_cast<float>(bias[c]);
output[idx] = (static_cast<float>(input[idx]) - means[outer_idx]) * s + b;
}
}
}

template <class T>
Expand Down Expand Up @@ -142,4 +153,21 @@ template void normalize_mean_variance(const Stream&, Span<__half>, View<__half>,
#endif
template void normalize_mean_variance(const Stream&, Span<float>, View<float>, View<float>, View<float>, std::size_t);

template <class T>
void normalize_mean_variance_channelwise(const Stream& stream, Span<T> output, View<T> input, View<T> scale, View<T> bias, View<float> means, View<float> stdev, std::size_t inner_size, std::size_t C)
{
CV_Assert(input.size() == output.size());
CV_Assert(input.size() / inner_size == means.size());
CV_Assert(means.size() == stdev.size());

auto kernel = raw::normalize_mean_variance_channelwise<T>;
auto policy = make_policy(kernel, output.size(), 0, stream);
launch_kernel(kernel, policy, output, input, scale, bias, means, stdev, inner_size, C);
}

#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void normalize_mean_variance_channelwise(const Stream&, Span<__half> /*output*/, View<__half> /*input*/, View<__half> /*scale*/, View<__half> /*bias*/, View<float> /*means*/, View<float> /*stdev*/, std::size_t, std::size_t);
#endif
template void normalize_mean_variance_channelwise(const Stream&, Span<float> /*output*/, View<float> /*input*/, View<float> /*scale*/, View<float> /*bias*/, View<float> /*means*/, View<float> /*stdev*/, std::size_t, std::size_t);

}}}} /* namespace cv::dnn::cuda4dnn::kernels */
3 changes: 3 additions & 0 deletions modules/dnn/src/cuda4dnn/kernels/mvn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ void normalize_mean(const csl::Stream& stream, csl::Span<T> output, csl::View<T>
template <class T>
void normalize_mean_variance(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input, csl::View<float> means, csl::View<float> scale, std::size_t inner_size);

template <class T>
void normalize_mean_variance_channelwise(const csl::Stream &stream, csl::Span<T> output, csl::View<T> input, csl::View<T> scale, csl::View<T> bias, csl::View<float> means, csl::View<float> stdev, std::size_t inner_size, std::size_t C);

}}}} /* namespace cv::dnn::cuda4dnn::kernels */

#endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_MVN_HPP */
86 changes: 86 additions & 0 deletions modules/dnn/src/cuda4dnn/primitives/instance_norm.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.

#ifndef OPENCV_DNN_SRC_CUDA4DNN_PRIMITIVES_INSTANCE_NORM_HPP
#define OPENCV_DNN_SRC_CUDA4DNN_PRIMITIVES_INSTANCE_NORM_HPP

#include "../../op_cuda.hpp"

#include "../csl/stream.hpp"
#include "../csl/span.hpp"
#include "../csl/tensor.hpp"
#include "../csl/workspace.hpp"

#include "../kernels/fill_copy.hpp"
#include "../kernels/mvn.hpp"

#include <opencv2/core.hpp>

#include <cstddef>
#include <vector>
#include <utility>

namespace cv { namespace dnn { namespace cuda4dnn {

template <class T>
class InstanceNormOp final : public CUDABackendNode {
public:
using wrapper_type = GetCUDABackendWrapperType<T>;

InstanceNormOp(csl::Stream stream_, float epsilon_, size_t loops)
: stream(std::move(stream_)), epsilon(epsilon_) {
csl::WorkspaceBuilder builder;
builder.require<float>(loops);
builder.require<float>(loops);
scratch_mem_in_bytes = builder.required_workspace_size();
}

void forward(const std::vector<cv::Ptr<BackendWrapper>>& inputs,
const std::vector<cv::Ptr<BackendWrapper>>& outputs,
csl::Workspace& workspace) override {
auto input_wrapper = inputs[0].dynamicCast<wrapper_type>();
auto scale_wrapper = inputs[1].dynamicCast<wrapper_type>();
auto bias_wrapper = inputs[2].dynamicCast<wrapper_type>();

auto input = input_wrapper->getView();
auto scale = scale_wrapper->getView();
auto bias = bias_wrapper->getView();

auto output_wrapper = outputs[0].dynamicCast<wrapper_type>();
auto output = output_wrapper->getSpan();

auto C = input.get_axis_size(1);
auto loops = input.size_range(0, 2);
auto norm_size = input.size_range(2, input.rank());
if (norm_size == 1) {
kernels::fill<T>(stream, output, 0.f);
return;
} else {
auto ws_allocator = csl::WorkspaceAllocator(workspace);

auto mean = ws_allocator.get_span<float>(loops);
kernels::fill<float>(stream, mean, 0.f);

auto stdev = ws_allocator.get_span<float>(loops);
kernels::fill<float>(stream, stdev, 0.f);

kernels::reduce_mean_sqr_sum<T>(stream, mean, stdev, input, norm_size);
kernels::compute_normalization_scale(stream, stdev, mean, stdev, norm_size, epsilon);
kernels::normalize_mean_variance_channelwise<T>(stream, output, input, scale, bias, mean, stdev, norm_size, C);
}
}

std::size_t get_workspace_memory_in_bytes() const noexcept override { return scratch_mem_in_bytes; }

private:
csl::Stream stream;

float epsilon;

std::size_t scratch_mem_in_bytes;
};

}}} // cv::dnn::cuda4dnn

#endif // OPENCV_DNN_SRC_CUDA4DNN_PRIMITIVES_INSTANCE_NORM_HPP
1 change: 1 addition & 0 deletions modules/dnn/src/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS(GatherElements, GatherElementsLayer);
CV_DNN_REGISTER_LAYER_CLASS(LayerNormalization, LayerNormLayer);
CV_DNN_REGISTER_LAYER_CLASS(Expand, ExpandLayer);
CV_DNN_REGISTER_LAYER_CLASS(InstanceNormalization, InstanceNormLayer);

CV_DNN_REGISTER_LAYER_CLASS(Crop, CropLayer);
CV_DNN_REGISTER_LAYER_CLASS(Eltwise, EltwiseLayer);
Expand Down
7 changes: 4 additions & 3 deletions modules/dnn/src/layers/cpu_kernels/fast_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,11 @@ void fastNorm(const Mat &input, const Mat &scale, const Mat &bias, Mat &output,

void fastNormChannel(const Mat &input, const Mat &scale, const Mat &bias, Mat &output, float epsilon) {
const auto input_shape = shape(input);
size_t N = input_shape[0], C = input_shape[1];
CV_CheckEQ(scale.total(), bias.total(), "fastNormChannel: scale and bias should have the same shape");
CV_CheckEQ(scale.total(), C, "fastNormChannel: scale should be a 1d tensor and match the channel of input");
CV_CheckGE(input.dims, 3, "fastNormChannel: input dimension >= 3");

size_t N = input_shape[0], C = input_shape[1];
size_t loops = N * C,
norm_size = static_cast<size_t>(total(input_shape, 2));
float inv_norm_size = 1.0 / norm_size;
Expand All @@ -147,9 +148,9 @@ void fastNormChannel(const Mat &input, const Mat &scale, const Mat &bias, Mat &o
float inv_stdev = 1.f / mean_square;

size_t c = i % C;
float s = scale_data[c], b = bias_data[c];
float s = scale_data[c] * inv_stdev, b = bias_data[c];
for (size_t j = 0; j < norm_size; j++) {
y[j] = s * (x[j] - mean) * inv_stdev + b;
y[j] = s * (x[j] - mean) + b;
}
}
};
Expand Down

0 comments on commit ee0822d

Please sign in to comment.