From 8c8d8e29e453946c07a6704a2035748b8a912de6 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 8 Mar 2023 21:12:04 +0800 Subject: [PATCH 01/18] support rtmpose ncnn --- .../single-stage_ncnn-fp16_static-320x320.py | 5 + .../single-stage_ncnn_static-320x320.py | 4 + ...etection_simcc_ncnn-fp16_static-256x192.py | 4 + .../backend_ops/ncnn/onnx2ncnn/onnx2ncnn.cpp | 2 - mmdeploy/codebase/mmpose/models/__init__.py | 1 + .../codebase/mmpose/models/utils/__init__.py | 5 + .../mmpose/models/utils/rtmpose_block.py | 91 +++++++++++++++++++ 7 files changed, 110 insertions(+), 2 deletions(-) create mode 100644 configs/mmdet/detection/single-stage_ncnn-fp16_static-320x320.py create mode 100644 configs/mmdet/detection/single-stage_ncnn_static-320x320.py create mode 100644 configs/mmpose/pose-detection_simcc_ncnn-fp16_static-256x192.py create mode 100644 mmdeploy/codebase/mmpose/models/utils/__init__.py create mode 100644 mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py diff --git a/configs/mmdet/detection/single-stage_ncnn-fp16_static-320x320.py b/configs/mmdet/detection/single-stage_ncnn-fp16_static-320x320.py new file mode 100644 index 0000000000..a444d06056 --- /dev/null +++ b/configs/mmdet/detection/single-stage_ncnn-fp16_static-320x320.py @@ -0,0 +1,5 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/ncnn.py'] + +backend_config = dict(precision='FP16') +codebase_config = dict(model_type='ncnn_end2end') +onnx_config = dict(output_names=['detection_output'], input_shape=[320, 320]) diff --git a/configs/mmdet/detection/single-stage_ncnn_static-320x320.py b/configs/mmdet/detection/single-stage_ncnn_static-320x320.py new file mode 100644 index 0000000000..38cfd0226a --- /dev/null +++ b/configs/mmdet/detection/single-stage_ncnn_static-320x320.py @@ -0,0 +1,4 @@ +_base_ = ['../_base_/base_static.py', '../../_base_/backends/ncnn.py'] + +codebase_config = dict(model_type='ncnn_end2end') +onnx_config = dict(output_names=['detection_output'], input_shape=[320, 320]) diff --git a/configs/mmpose/pose-detection_simcc_ncnn-fp16_static-256x192.py b/configs/mmpose/pose-detection_simcc_ncnn-fp16_static-256x192.py new file mode 100644 index 0000000000..e7d3afe588 --- /dev/null +++ b/configs/mmpose/pose-detection_simcc_ncnn-fp16_static-256x192.py @@ -0,0 +1,4 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/ncnn.py'] + +backend_config = dict(precision='FP16') +onnx_config = dict(input_shape=[192, 256], output_names=['simcc_x', 'simcc_y']) diff --git a/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/onnx2ncnn.cpp b/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/onnx2ncnn.cpp index 595bb8bf6b..ca8cd628ad 100644 --- a/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/onnx2ncnn.cpp +++ b/csrc/mmdeploy/backend_ops/ncnn/onnx2ncnn/onnx2ncnn.cpp @@ -2200,8 +2200,6 @@ int main(int argc, char** argv) { } fprintf(pp, " 4=%d", keepdims); fprintf(pp, " 5=1"); - // Force set Reduction for FP32, FP16 may exceed for some models. - fprintf(pp, " 31=15"); } else if (op == "Reorg") { int stride = get_node_attr_i(node, "stride", 1); fprintf(pp, " 0=%d", stride); diff --git a/mmdeploy/codebase/mmpose/models/__init__.py b/mmdeploy/codebase/mmpose/models/__init__.py index 304096f9ed..9c779c0716 100644 --- a/mmdeploy/codebase/mmpose/models/__init__.py +++ b/mmdeploy/codebase/mmpose/models/__init__.py @@ -2,3 +2,4 @@ from . import heads # noqa: F401,F403 from . import pose_estimators # noqa: F401,F403 +from . import utils # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmpose/models/utils/__init__.py b/mmdeploy/codebase/mmpose/models/utils/__init__.py new file mode 100644 index 0000000000..966140579e --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from . import rtmpose_block + +__all__ = ['rtmpose_block'] diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py new file mode 100644 index 0000000000..c4ef347c2f --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch + +from mmdeploy.core import FUNCTION_REWRITER +import torch.nn.functional as F + +from mmpose.models.utils import rope + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.utils.rtmpose_block.ScaleNorm.forward', backend='ncnn') +def scalenorm__forward__ncnn(self, x): + """Rewrite `scalenorm` for ncnn backend. + ncnn does not support negative dimension for torch.chunk and torch.cat + ncnn pad shape does not support float input + """ + # The one-dim of Fubinious norm is equal to L2Norm. + # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, + # which will avoid FP16 exceed. + norm = torch.norm(x, p=2, dim=2, keepdim=True) + norm = norm * self.scale + # Rewrite for ncnn binaryop broadcast. + norm = norm.clamp(min=self.eps) + return (x.unsqueeze(2) / norm.unsqueeze(2)).squeeze(2) * self.g + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.utils.rtmpose_block.RTMBlock._forward', backend='ncnn') +def rtmblock___forward_ncnn(self, inputs): + """Rewrite `_forward` of RTMBlock for ncnn backend. + ncnn does not support negative dimension for Split op. + """ + if self.attn_type == 'self-attn': + x = inputs + else: + x, k, v = inputs + + x = self.ln(x) + uv = self.uv(x) + if self.attn_type == 'self-attn': + uv = self.act_fn(uv) + u = uv[..., :self.e] + v = uv[..., 512:1024] + base = uv[..., 2 * self.e:2 * self.e + self.s] + + q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] + + self.beta[None, None, 0:1, :]).squeeze(1) + k = (base.unsqueeze(1) * self.gamma[None, None, 1:2, :] + + self.beta[None, None, 1:2, :]).squeeze(1) + + if self.pos_enc: + q = rope(q, dim=1) + k = rope(k, dim=1) + else: + u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=uv.dim() - 1) + + k = self.k_fc(k) + v = self.v_fc(v) + + if self.pos_enc: + q = rope(q, 1) + k = rope(k, 1) + qk = torch.bmm(q, k.permute(0, 2, 1)) + if self.use_rel_bias: + if self.attn_type == 'self-attn': + bias = self.rel_pos_bias(q.size(1)) + else: + bias = self.rel_pos_bias(q.size(1), k.size(1)) + qk += bias[:, :q.size(1), :k.size(1)] + + kernel = torch.square(F.relu(qk / self.sqrt_s)) + if self.dropout_rate > 0.: + kernel = self.dropout(kernel) + + x = u * torch.bmm(kernel, v) + x = self.o(x) + + return x + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.utils.rtmpose_block.Scale.forward', backend='ncnn') +def scale__forward_ncnn(self, x): + """Rewrite `forward` of Scale for ncnn backend. + Adapt the shape to avoid ncnn BinaryOp seg fault. + """ + + x = x.unsqueeze(1) + scale = self.scale[None, None, None, :] + return (x * scale).squeeze(1) From 5f7a21dfbd205f817cf58799e610118c077c69b0 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 8 Mar 2023 21:13:26 +0800 Subject: [PATCH 02/18] fix docformatter --- .../codebase/mmpose/models/utils/rtmpose_block.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py index c4ef347c2f..3c3afbc0a4 100644 --- a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py +++ b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py @@ -1,19 +1,19 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch - -from mmdeploy.core import FUNCTION_REWRITER import torch.nn.functional as F - from mmpose.models.utils import rope +from mmdeploy.core import FUNCTION_REWRITER + @FUNCTION_REWRITER.register_rewriter( 'mmpose.models.utils.rtmpose_block.ScaleNorm.forward', backend='ncnn') def scalenorm__forward__ncnn(self, x): """Rewrite `scalenorm` for ncnn backend. - ncnn does not support negative dimension for torch.chunk and torch.cat - ncnn pad shape does not support float input + + ncnn does not support negative dimension for torch.chunk and + torch.cat ncnn pad shape does not support float input """ # The one-dim of Fubinious norm is equal to L2Norm. # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, @@ -29,6 +29,7 @@ def scalenorm__forward__ncnn(self, x): 'mmpose.models.utils.rtmpose_block.RTMBlock._forward', backend='ncnn') def rtmblock___forward_ncnn(self, inputs): """Rewrite `_forward` of RTMBlock for ncnn backend. + ncnn does not support negative dimension for Split op. """ if self.attn_type == 'self-attn': @@ -83,6 +84,7 @@ def rtmblock___forward_ncnn(self, inputs): 'mmpose.models.utils.rtmpose_block.Scale.forward', backend='ncnn') def scale__forward_ncnn(self, x): """Rewrite `forward` of Scale for ncnn backend. + Adapt the shape to avoid ncnn BinaryOp seg fault. """ From 382414efae8120e5140a497f8d33c630fb1c0595 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Thu, 9 Mar 2023 11:00:03 +0800 Subject: [PATCH 03/18] fix docformatter --- mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py index 3c3afbc0a4..bf421da61d 100644 --- a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py +++ b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py @@ -12,8 +12,8 @@ def scalenorm__forward__ncnn(self, x): """Rewrite `scalenorm` for ncnn backend. - ncnn does not support negative dimension for torch.chunk and - torch.cat ncnn pad shape does not support float input + ncnn does not support negative dimension for torch.chunk and torch.cat ncnn + pad shape does not support float input """ # The one-dim of Fubinious norm is equal to L2Norm. # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, From 9b20995e5cead08c01eeb53a54532d1b3c91843c Mon Sep 17 00:00:00 2001 From: root Date: Mon, 13 Mar 2023 06:26:14 +0000 Subject: [PATCH 04/18] fix classname from tauj to dev-1.x branch --- .../codebase/mmpose/models/utils/__init__.py | 4 +- .../mmpose/models/utils/rtmcc_block.py | 93 +++++++++++++++++++ 2 files changed, 95 insertions(+), 2 deletions(-) create mode 100644 mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py diff --git a/mmdeploy/codebase/mmpose/models/utils/__init__.py b/mmdeploy/codebase/mmpose/models/utils/__init__.py index 966140579e..7d65ebdb8c 100644 --- a/mmdeploy/codebase/mmpose/models/utils/__init__.py +++ b/mmdeploy/codebase/mmpose/models/utils/__init__.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from . import rtmpose_block +from . import rtmcc_block -__all__ = ['rtmpose_block'] +__all__ = ['rtmcc_block'] diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py new file mode 100644 index 0000000000..7e3fbddd51 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch +import torch.nn.functional as F +from mmpose.models.utils import rope + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.utils.rtmcc_block.ScaleNorm.forward', backend='ncnn') +def scalenorm__forward__ncnn(self, x): + """Rewrite `scalenorm` for ncnn backend. + + ncnn does not support negative dimension for torch.chunk and torch.cat ncnn + pad shape does not support float input + """ + # The one-dim of Fubinious norm is equal to L2Norm. + # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, + # which will avoid FP16 exceed. + norm = torch.norm(x, p=2, dim=2, keepdim=True) + norm = norm * self.scale + # Rewrite for ncnn binaryop broadcast. + norm = norm.clamp(min=self.eps) + return (x.unsqueeze(2) / norm.unsqueeze(2)).squeeze(2) * self.g + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.utils.rtmcc_block.RTMCCBlock._forward', backend='ncnn') +def rtmccblock___forward_ncnn(self, inputs): + """Rewrite `_forward` of RTMBlock for ncnn backend. + + ncnn does not support negative dimension for Split op. + """ + if self.attn_type == 'self-attn': + x = inputs + else: + x, k, v = inputs + + x = self.ln(x) + uv = self.uv(x) + if self.attn_type == 'self-attn': + uv = self.act_fn(uv) + u = uv[..., :self.e] + v = uv[..., 512:1024] + base = uv[..., 2 * self.e:2 * self.e + self.s] + + q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] + + self.beta[None, None, 0:1, :]).squeeze(1) + k = (base.unsqueeze(1) * self.gamma[None, None, 1:2, :] + + self.beta[None, None, 1:2, :]).squeeze(1) + + if self.pos_enc: + q = rope(q, dim=1) + k = rope(k, dim=1) + else: + u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=uv.dim() - 1) + + k = self.k_fc(k) + v = self.v_fc(v) + + if self.pos_enc: + q = rope(q, 1) + k = rope(k, 1) + qk = torch.bmm(q, k.permute(0, 2, 1)) + if self.use_rel_bias: + if self.attn_type == 'self-attn': + bias = self.rel_pos_bias(q.size(1)) + else: + bias = self.rel_pos_bias(q.size(1), k.size(1)) + qk += bias[:, :q.size(1), :k.size(1)] + + kernel = torch.square(F.relu(qk / self.sqrt_s)) + if self.dropout_rate > 0.: + kernel = self.dropout(kernel) + + x = u * torch.bmm(kernel, v) + x = self.o(x) + + return x + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.utils.rtmcc_block.Scale.forward', backend='ncnn') +def scale__forward_ncnn(self, x): + """Rewrite `forward` of Scale for ncnn backend. + + Adapt the shape to avoid ncnn BinaryOp seg fault. + """ + + x = x.unsqueeze(1) + scale = self.scale[None, None, None, :] + return (x * scale).squeeze(1) From 627cc30337593fb54d8f436c70599f96e3dc6bf1 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 13 Mar 2023 06:31:34 +0000 Subject: [PATCH 05/18] rename file --- .../mmpose/models/utils/rtmpose_block.py | 93 ------------------- 1 file changed, 93 deletions(-) delete mode 100644 mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py deleted file mode 100644 index bf421da61d..0000000000 --- a/mmdeploy/codebase/mmpose/models/utils/rtmpose_block.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -import torch -import torch.nn.functional as F -from mmpose.models.utils import rope - -from mmdeploy.core import FUNCTION_REWRITER - - -@FUNCTION_REWRITER.register_rewriter( - 'mmpose.models.utils.rtmpose_block.ScaleNorm.forward', backend='ncnn') -def scalenorm__forward__ncnn(self, x): - """Rewrite `scalenorm` for ncnn backend. - - ncnn does not support negative dimension for torch.chunk and torch.cat ncnn - pad shape does not support float input - """ - # The one-dim of Fubinious norm is equal to L2Norm. - # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, - # which will avoid FP16 exceed. - norm = torch.norm(x, p=2, dim=2, keepdim=True) - norm = norm * self.scale - # Rewrite for ncnn binaryop broadcast. - norm = norm.clamp(min=self.eps) - return (x.unsqueeze(2) / norm.unsqueeze(2)).squeeze(2) * self.g - - -@FUNCTION_REWRITER.register_rewriter( - 'mmpose.models.utils.rtmpose_block.RTMBlock._forward', backend='ncnn') -def rtmblock___forward_ncnn(self, inputs): - """Rewrite `_forward` of RTMBlock for ncnn backend. - - ncnn does not support negative dimension for Split op. - """ - if self.attn_type == 'self-attn': - x = inputs - else: - x, k, v = inputs - - x = self.ln(x) - uv = self.uv(x) - if self.attn_type == 'self-attn': - uv = self.act_fn(uv) - u = uv[..., :self.e] - v = uv[..., 512:1024] - base = uv[..., 2 * self.e:2 * self.e + self.s] - - q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] + - self.beta[None, None, 0:1, :]).squeeze(1) - k = (base.unsqueeze(1) * self.gamma[None, None, 1:2, :] + - self.beta[None, None, 1:2, :]).squeeze(1) - - if self.pos_enc: - q = rope(q, dim=1) - k = rope(k, dim=1) - else: - u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=uv.dim() - 1) - - k = self.k_fc(k) - v = self.v_fc(v) - - if self.pos_enc: - q = rope(q, 1) - k = rope(k, 1) - qk = torch.bmm(q, k.permute(0, 2, 1)) - if self.use_rel_bias: - if self.attn_type == 'self-attn': - bias = self.rel_pos_bias(q.size(1)) - else: - bias = self.rel_pos_bias(q.size(1), k.size(1)) - qk += bias[:, :q.size(1), :k.size(1)] - - kernel = torch.square(F.relu(qk / self.sqrt_s)) - if self.dropout_rate > 0.: - kernel = self.dropout(kernel) - - x = u * torch.bmm(kernel, v) - x = self.o(x) - - return x - - -@FUNCTION_REWRITER.register_rewriter( - 'mmpose.models.utils.rtmpose_block.Scale.forward', backend='ncnn') -def scale__forward_ncnn(self, x): - """Rewrite `forward` of Scale for ncnn backend. - - Adapt the shape to avoid ncnn BinaryOp seg fault. - """ - - x = x.unsqueeze(1) - scale = self.scale[None, None, None, :] - return (x * scale).squeeze(1) From 665b764aee2d8cf6869a7fcd12a78f8a90c34877 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 15 Mar 2023 17:07:06 +0800 Subject: [PATCH 06/18] fix comments --- mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py index 7e3fbddd51..c3698d299d 100644 --- a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py +++ b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py @@ -12,8 +12,7 @@ def scalenorm__forward__ncnn(self, x): """Rewrite `scalenorm` for ncnn backend. - ncnn does not support negative dimension for torch.chunk and torch.cat ncnn - pad shape does not support float input + Rewrite torch.norm to avoid FP16 exceed in ncnn Android platform. """ # The one-dim of Fubinious norm is equal to L2Norm. # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, @@ -42,7 +41,7 @@ def rtmccblock___forward_ncnn(self, inputs): if self.attn_type == 'self-attn': uv = self.act_fn(uv) u = uv[..., :self.e] - v = uv[..., 512:1024] + v = uv[..., self.e:2 * self.e] base = uv[..., 2 * self.e:2 * self.e + self.s] q = (base.unsqueeze(1) * self.gamma[None, None, 0:1, :] + From 9fedfb351bd0a02e9ee485ac5074f8cecd8f6b49 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 15 Mar 2023 17:39:32 +0800 Subject: [PATCH 07/18] remove unused rewriter --- mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py index c3698d299d..e3f9df8a15 100644 --- a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py +++ b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py @@ -29,7 +29,7 @@ def scalenorm__forward__ncnn(self, x): def rtmccblock___forward_ncnn(self, inputs): """Rewrite `_forward` of RTMBlock for ncnn backend. - ncnn does not support negative dimension for Split op. + Rewrite the matmul and avoid unbind for ncnn backend. """ if self.attn_type == 'self-attn': x = inputs @@ -53,7 +53,7 @@ def rtmccblock___forward_ncnn(self, inputs): q = rope(q, dim=1) k = rope(k, dim=1) else: - u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=uv.dim() - 1) + u, q = torch.split(self.act_fn(uv), [self.e, self.s], dim=-1) k = self.k_fc(k) v = self.v_fc(v) From 76d53a0807b669ef9fcd4577f6a7491b55b77d4a Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 15 Mar 2023 18:10:29 +0800 Subject: [PATCH 08/18] fix norm --- .../mmpose/models/utils/rtmcc_block.py | 4 +-- mmdeploy/pytorch/functions/normalize.py | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py index e3f9df8a15..f73e72bec2 100644 --- a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py +++ b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py @@ -12,12 +12,12 @@ def scalenorm__forward__ncnn(self, x): """Rewrite `scalenorm` for ncnn backend. - Rewrite torch.norm to avoid FP16 exceed in ncnn Android platform. + Rewrite scalenorm to avoid FP16 exceed in ncnn Android platform. """ # The one-dim of Fubinious norm is equal to L2Norm. # Set p=2 explicitly to map torch.norm to ReduceL2 onnx op, # which will avoid FP16 exceed. - norm = torch.norm(x, p=2, dim=2, keepdim=True) + norm = torch.norm(x, dim=2, keepdim=True) norm = norm * self.scale # Rewrite for ncnn binaryop broadcast. norm = norm.clamp(min=self.eps) diff --git a/mmdeploy/pytorch/functions/normalize.py b/mmdeploy/pytorch/functions/normalize.py index b0ae4ccfe2..d526a65922 100644 --- a/mmdeploy/pytorch/functions/normalize.py +++ b/mmdeploy/pytorch/functions/normalize.py @@ -2,6 +2,8 @@ import torch +from typing import Optional, Sequence, Union + from mmdeploy.core import FUNCTION_REWRITER @@ -39,3 +41,26 @@ def normalize__ncnn(input: torch.Tensor, input.transpose(1, dim), p=p, dim=1, eps=eps).transpose(1, dim) return output + + +@FUNCTION_REWRITER.register_rewriter(func_name='torch.norm', backend='ncnn') +def norm__ncnn(input: torch.Tensor, + p: Optional[Union[int, str]] = 'fro', + dim: Optional[Union[int, Sequence]] = None, + keepdim: Optional[bool] = False, + out: Optional[torch.Tensor] = None, + dtype: Optional[torch.dtype] = None): + """Rewrite `torch.norm` for ncnn backend. + + Rewrite torch.norm for p=='fro' case to avoid FP16 exceed + in ncnn Android platform. + """ + ctx = FUNCTION_REWRITER.get_context() + origin_func = ctx.origin_func + if p == 'fro' and (isinstance(dim, int) or len(dim) == 1): + # Substitute fro with L2 norm. + return torch.norm( + input, p=2, dim=dim, keepdim=keepdim, out=out, dtype=dtype) + else: + return origin_func( + input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype) From 45e59dd858cfdea3ea45470ab689314ea63738f8 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 15 Mar 2023 19:30:19 +0800 Subject: [PATCH 09/18] fix lint --- mmdeploy/pytorch/functions/normalize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdeploy/pytorch/functions/normalize.py b/mmdeploy/pytorch/functions/normalize.py index d526a65922..6b1e11075e 100644 --- a/mmdeploy/pytorch/functions/normalize.py +++ b/mmdeploy/pytorch/functions/normalize.py @@ -52,8 +52,8 @@ def norm__ncnn(input: torch.Tensor, dtype: Optional[torch.dtype] = None): """Rewrite `torch.norm` for ncnn backend. - Rewrite torch.norm for p=='fro' case to avoid FP16 exceed - in ncnn Android platform. + Rewrite torch.norm when p is Frobenius norm to avoid FP16 exceed in ncnn + Android platform. """ ctx = FUNCTION_REWRITER.get_context() origin_func = ctx.origin_func From 31714c9c9c127c5ff7be49ed9834144b6d2cd1af Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 15 Mar 2023 19:32:12 +0800 Subject: [PATCH 10/18] fix rtmcc_block --- mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py index f73e72bec2..64dc092728 100644 --- a/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py +++ b/mmdeploy/codebase/mmpose/models/utils/rtmcc_block.py @@ -86,7 +86,6 @@ def scale__forward_ncnn(self, x): Adapt the shape to avoid ncnn BinaryOp seg fault. """ - x = x.unsqueeze(1) scale = self.scale[None, None, None, :] return (x * scale).squeeze(1) From 7f7e190d5793106b1788911d12446236d7a9072a Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Wed, 15 Mar 2023 20:09:51 +0800 Subject: [PATCH 11/18] fix norm --- mmdeploy/pytorch/functions/normalize.py | 2 +- tests/test_pytorch/test_pytorch_functions.py | 22 ++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/mmdeploy/pytorch/functions/normalize.py b/mmdeploy/pytorch/functions/normalize.py index 6b1e11075e..859ca21396 100644 --- a/mmdeploy/pytorch/functions/normalize.py +++ b/mmdeploy/pytorch/functions/normalize.py @@ -58,7 +58,7 @@ def norm__ncnn(input: torch.Tensor, ctx = FUNCTION_REWRITER.get_context() origin_func = ctx.origin_func if p == 'fro' and (isinstance(dim, int) or len(dim) == 1): - # Substitute fro with L2 norm. + # Substitute Frobenius norm with L2 norm. return torch.norm( input, p=2, dim=dim, keepdim=keepdim, out=out, dtype=dtype) else: diff --git a/tests/test_pytorch/test_pytorch_functions.py b/tests/test_pytorch/test_pytorch_functions.py index 245bfba9d4..b137fc3fdd 100644 --- a/tests/test_pytorch/test_pytorch_functions.py +++ b/tests/test_pytorch/test_pytorch_functions.py @@ -163,6 +163,28 @@ def linear_caller(*arg, **kwargs): assert np.allclose(model_output, rewrite_output[0], rtol=1e-03, atol=1e-05) +@backend_checker(Backend.NCNN) +def test_norm_ncnn(): + import onnx + + import mmdeploy.apis.ncnn as ncnn_apis + from mmdeploy.utils.test import get_onnx_model + + input = torch.rand(1, 17, 24) + wrapped_func = WrapFunction(torch.norm, p='fro', dim=2, keepdim=True) + model_inputs = {'input': input} + ir_file_path = get_onnx_model(wrapped_func, model_inputs, deploy_cfg_ncnn) + assert osp.exists(ir_file_path) + onnx_model = onnx.load(ir_file_path) + nodes = onnx_model.graph.node + assert nodes[-1].name.startswith('ReduceL2') + ncnn_files_prefix = osp.splitext(ir_file_path)[0] + ncnn_apis.from_onnx(ir_file_path, ncnn_files_prefix) + param_path, bin_path = ncnn_apis.get_output_model_file(ir_file_path) + assert osp.exists(param_path) + assert osp.exists(bin_path) + + @backend_checker(Backend.TENSORRT) def test_repeat_static(): input = torch.rand([1]) From 84a39129ec0c9d88358a435b43f700ac4b3554b8 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Thu, 16 Mar 2023 11:39:00 +0800 Subject: [PATCH 12/18] add ut --- .../test_mmpose/test_mmpose_models.py | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/tests/test_codebase/test_mmpose/test_mmpose_models.py b/tests/test_codebase/test_mmpose/test_mmpose_models.py index eb22d35193..3864c1fc76 100644 --- a/tests/test_codebase/test_mmpose/test_mmpose_models.py +++ b/tests/test_codebase/test_mmpose/test_mmpose_models.py @@ -6,6 +6,11 @@ from mmdeploy.utils import Backend, Codebase from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs +try: + from torch.testing import assert_close as torch_assert_close +except Exception: + from torch.testing import assert_allclose as torch_assert_close + try: import_codebase(Codebase.MMPOSE) except ImportError: @@ -108,3 +113,78 @@ def test_estimator_forward(backend_type: Backend): run_with_backend=False, deploy_cfg=deploy_cfg) assert isinstance(rewrite_outputs, torch.Tensor) + + +def get_scale_norm_model(): + from mmpose.models.utils.rtmcc_block import ScaleNorm + + model = ScaleNorm(48) + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', [Backend.NCNN]) +def test_scale_norm_forward(backend_type: Backend): + check_backend(backend_type, True) + deploy_cfg = generate_mmpose_deploy_config(backend_type.value) + model = get_scale_norm_model() + x = torch.rand(1, 17, 48) + wrapped_model = WrapModel(model, 'forward') + model_outputs = model.forward(x) + rewrite_inputs = {'x': x} + rewrite_outputs, _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=False) + torch_assert_close(rewrite_outputs, model_outputs) + + +def get_rtmcc_block_model(): + from mmpose.models.utils.rtmcc_block import RTMCCBlock + + model = RTMCCBlock(48, 48, 48) + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', [Backend.NCNN]) +def test_rtmcc_block_forward(backend_type: Backend): + check_backend(backend_type, True) + deploy_cfg = generate_mmpose_deploy_config(backend_type.value) + model = get_rtmcc_block_model() + inputs = torch.rand(1, 17, 48) + wrapped_model = WrapModel(model, '_forward') + model_outputs = model._forward(inputs) + rewrite_inputs = {'inputs': inputs} + rewrite_outputs, _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=False) + torch_assert_close(rewrite_outputs, model_outputs) + + +def get_scale_model(): + from mmpose.models.utils.rtmcc_block import Scale + + model = Scale(48) + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', [Backend.NCNN]) +def test_scale_forward(backend_type: Backend): + check_backend(backend_type, True) + deploy_cfg = generate_mmpose_deploy_config(backend_type.value) + model = get_scale_model() + x = torch.rand(1, 17, 48) + wrapped_model = WrapModel(model, 'forward') + model_outputs = model.forward(x) + rewrite_inputs = {'x': x} + rewrite_outputs, _ = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=False) + torch_assert_close(rewrite_outputs, model_outputs) From cc39bac743a6a133e89ce723c1377b364d624fa2 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Thu, 16 Mar 2023 11:46:43 +0800 Subject: [PATCH 13/18] fix origin_func --- mmdeploy/pytorch/functions/normalize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmdeploy/pytorch/functions/normalize.py b/mmdeploy/pytorch/functions/normalize.py index 859ca21396..5b14e4facb 100644 --- a/mmdeploy/pytorch/functions/normalize.py +++ b/mmdeploy/pytorch/functions/normalize.py @@ -59,7 +59,7 @@ def norm__ncnn(input: torch.Tensor, origin_func = ctx.origin_func if p == 'fro' and (isinstance(dim, int) or len(dim) == 1): # Substitute Frobenius norm with L2 norm. - return torch.norm( + return origin_func( input, p=2, dim=dim, keepdim=keepdim, out=out, dtype=dtype) else: return origin_func( From 451db44947d909f1b5b1165eff3d65829be19f29 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Thu, 16 Mar 2023 12:17:47 +0800 Subject: [PATCH 14/18] fix norm --- mmdeploy/pytorch/functions/normalize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmdeploy/pytorch/functions/normalize.py b/mmdeploy/pytorch/functions/normalize.py index 5b14e4facb..21956633c1 100644 --- a/mmdeploy/pytorch/functions/normalize.py +++ b/mmdeploy/pytorch/functions/normalize.py @@ -1,9 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. -import torch - from typing import Optional, Sequence, Union +import torch + from mmdeploy.core import FUNCTION_REWRITER From c4f255f28279661eb31b7869909d559bfda034a9 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Fri, 17 Mar 2023 14:18:34 +0800 Subject: [PATCH 15/18] fix rtmdet_head --- .../mmdet/models/dense_heads/rtmdet_head.py | 118 ++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py index 91fde24c21..a58f9546c3 100644 --- a/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py @@ -8,6 +8,7 @@ from mmdeploy.codebase.mmdet import get_post_processing_params from mmdeploy.core import FUNCTION_REWRITER, mark +from mmdeploy.utils import Backend from mmdeploy.mmcv.ops import multiclass_nms @@ -105,3 +106,120 @@ def __mark_pred_maps(cls_scores, bbox_preds): score_threshold=score_threshold, pre_top_k=pre_top_k, keep_top_k=keep_top_k) + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.dense_heads.rtmdet_head.' + 'RTMDetHead.predict_by_feat', + backend=Backend.NCNN.value) +def rtmdet_head__predict_by_feat__ncnn( + self, + cls_scores: List[Tensor], + bbox_preds: List[Tensor], + batch_img_metas: Optional[List[dict]] = None, + cfg: Optional[ConfigDict] = None, + rescale: bool = False, + with_nms: bool = True): + """Rewrite `predict_by_feat` of RTMDetHead for ncnn backend. + 1. Decode the prior to a box format for ncnn DetectionOutput layer to do + the post-processing. + 2. Batch dimension is not supported by ncnn, but supported by pytorch. + The negative value of axis in torch.cat is rewritten as corresponding + positive value to avoid axis shift. + 3. 2-dimension tensor broadcast of `BinaryOps` operator is not supported by + ncnn. This function unsqueeze 2-dimension tensor to 3-dimension tensor for + correct `BinaryOps` calculation by ncnn. + Args: + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + objectnesses (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, 1, H, W). + batch_img_metas (list[dict], Optional): Batch image meta info. + Defaults to None. + cfg (ConfigDict, optional): Test / postprocessing + configuration, if None, test_cfg would be used. + Defaults to None. + rescale (bool): If True, return boxes in original image space. + Defaults to False. + with_nms (bool): If True, do nms before return boxes. + Defaults to True. + Returns: + output__ncnn (Tensor): outputs, shape is [N, num_det, 6]. + """ + ctx = FUNCTION_REWRITER.get_context() + from mmdeploy.codebase.mmdet.ops import ncnn_detection_output_forward + from mmdeploy.utils import get_root_logger + from mmdeploy.utils.config_utils import is_dynamic_shape + dynamic_flag = is_dynamic_shape(ctx.cfg) + if dynamic_flag: + logger = get_root_logger() + logger.warning('RTMDet does not support dynamic shape with ncnn.') + img_height = int(batch_img_metas[0]['img_shape'][0]) + img_width = int(batch_img_metas[0]['img_shape'][1]) + + assert len(cls_scores) == len(bbox_preds) + device = cls_scores[0].device + cfg = self.test_cfg if cfg is None else cfg + batch_size = bbox_preds[0].shape[0] + featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, device=device, with_stride=True) + mlvl_priors = [mlvl_prior.unsqueeze(0) for mlvl_prior in mlvl_priors] + flatten_priors = torch.cat(mlvl_priors, dim=1) + + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1, + self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4) + for bbox_pred in bbox_preds + ] + + cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid() + dummy_cls_scores = torch.zeros( + batch_size, cls_scores.shape[-2], 1, device=cls_scores.device) + + batch_mlvl_scores = torch.cat([dummy_cls_scores, cls_scores], dim=2) + + flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1) + assert flatten_priors.shape[-1] == 4, f'rtmdet needs (B, N, 4) priors, got\ + (B, N, {flatten_priors.shape[-1]})' + + tl_x = (flatten_priors[:, :, 0:1] - + flatten_bbox_preds[:, :, 0:1]) / img_width + tl_y = (flatten_priors[:, :, 1:2] - + flatten_bbox_preds[:, :, 1:2]) / img_height + br_x = (flatten_priors[:, :, 0:1] + + flatten_bbox_preds[:, :, 2:3]) / img_width + br_y = (flatten_priors[:, :, 1:2] + + flatten_bbox_preds[:, :, 3:4]) / img_height + prior_box_ncnn = torch.stack([tl_x, tl_y, br_x, br_y], -1) + + scores = batch_mlvl_scores + + batch_mlvl_bboxes = flatten_bbox_preds.reshape(batch_size, 1, -1) + batch_mlvl_scores = scores.reshape(batch_size, 1, -1) + batch_mlvl_priors = prior_box_ncnn.reshape(batch_size, 1, -1) + batch_mlvl_vars = torch.ones_like(batch_mlvl_priors) + batch_mlvl_priors = torch.cat([batch_mlvl_priors, batch_mlvl_vars], dim=1) + deploy_cfg = ctx.cfg + post_params = get_post_processing_params(deploy_cfg) + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = post_params.pre_top_k + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + + vars = torch.tensor([1, 1, 1, 1], dtype=torch.float32) + output__ncnn = ncnn_detection_output_forward( + batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_priors, + score_threshold, iou_threshold, pre_top_k, keep_top_k, + self.num_classes + 1, + vars.cpu().detach().numpy()) + return output__ncnn From d88987f22af7cdee89e35e779cbbff3cfa61e93c Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Fri, 17 Mar 2023 15:51:46 +0800 Subject: [PATCH 16/18] add ut --- .../test_mmdet/test_mmdet_models.py | 83 +++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py index c68918e6aa..f1b7cfa2c2 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -2121,3 +2121,86 @@ def test_solo_head_predict_by_feat(backend_type: Backend): atol=1e-05) else: assert rewrite_outputs is not None + + +def get_rtmdet_head_model(): + + from mmdet.models.dense_heads import RTMDetHead + from mmdet.models.task_modules.prior_generators.point_generator import MlvlPointGenerator + + test_cfg = Config( + dict( + deploy_nms_pre=0, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + model = RTMDetHead(1, 64) + model.prior_generator = MlvlPointGenerator([8, 4, 2]) + model.test_cfg = test_cfg + + model.requires_grad_(False) + return model + + +def test_rtmdet_head_predict_by_feat_ncnn(): + """Test predict_by_feat rewrite of yolov3 head.""" + backend_type = Backend.NCNN + check_backend(backend_type) + rtmdet_head = get_rtmdet_head_model() + rtmdet_head.cpu().eval() + s = 320 + batch_img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + + output_names = ['detection_output'] + deploy_cfg = Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + model_type='ncnn_end2end', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.45, + confidence_threshold=0.005, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=10, + background_label_id=-1, + )))) + + seed_everything(1234) + cls_scores = [ + torch.rand(1, 1, 40, 40), + torch.rand(1, 1, 20, 20), + torch.rand(1, 1, 10, 10) + ] + + bbox_preds = [ + torch.rand(1, 4, 40, 40), + torch.rand(1, 4, 20, 20), + torch.rand(1, 4, 10, 10) + ] + + # to get outputs of onnx model after rewrite + wrapped_model = WrapModel( + rtmdet_head, + 'predict_by_feat', + batch_img_metas=batch_img_metas, + with_nms=True) + rewrite_inputs = {'cls_scores': cls_scores, 'bbox_preds': bbox_preds} + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + # output should be of shape [1, N, 6] + if is_backend_output: + assert rewrite_outputs[0].shape[-1] == 6 + else: + assert rewrite_outputs.shape[-1] == 6 From 871de35a5926946410ae6464b3150de9d8d521c8 Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Fri, 17 Mar 2023 15:58:21 +0800 Subject: [PATCH 17/18] false run_with_backend for ncnn --- tests/test_codebase/test_mmdet/test_mmdet_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py index f1b7cfa2c2..64fc623373 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -2198,7 +2198,8 @@ def test_rtmdet_head_predict_by_feat_ncnn(): rewrite_outputs, is_backend_output = get_rewrite_outputs( wrapped_model=wrapped_model, model_inputs=rewrite_inputs, - deploy_cfg=deploy_cfg) + deploy_cfg=deploy_cfg, + run_with_backend=False) # output should be of shape [1, N, 6] if is_backend_output: assert rewrite_outputs[0].shape[-1] == 6 From 5d946f1c242fa6f6a199ff91c8a5fabfd7e4f5ac Mon Sep 17 00:00:00 2001 From: hanrui1sensetime Date: Fri, 17 Mar 2023 16:00:37 +0800 Subject: [PATCH 18/18] fix lint --- mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py | 2 +- tests/test_codebase/test_mmdet/test_mmdet_models.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py index a58f9546c3..4113b944c9 100644 --- a/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/rtmdet_head.py @@ -8,8 +8,8 @@ from mmdeploy.codebase.mmdet import get_post_processing_params from mmdeploy.core import FUNCTION_REWRITER, mark -from mmdeploy.utils import Backend from mmdeploy.mmcv.ops import multiclass_nms +from mmdeploy.utils import Backend @FUNCTION_REWRITER.register_rewriter( diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py index 64fc623373..376806e793 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -2126,7 +2126,8 @@ def test_solo_head_predict_by_feat(backend_type: Backend): def get_rtmdet_head_model(): from mmdet.models.dense_heads import RTMDetHead - from mmdet.models.task_modules.prior_generators.point_generator import MlvlPointGenerator + from mmdet.models.task_modules.prior_generators.point_generator import \ + MlvlPointGenerator test_cfg = Config( dict(