Skip to content

Commit

Permalink
--fix=fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
xiexinch committed Apr 25, 2024
1 parent 9aa8bf5 commit fb15f42
Show file tree
Hide file tree
Showing 9 changed files with 73 additions and 70 deletions.
5 changes: 3 additions & 2 deletions projects/rtmpose3d/body3d_img2pose_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,15 @@
split_instances)
from mmpose.utils import adapt_mmdet_pipeline
from mmpose.visualization import Pose3dLocalVisualizer
from rtmpose3d import *

try:
from mmdet.apis import inference_detector, init_detector
has_mmdet = True
except (ImportError, ModuleNotFoundError):
has_mmdet = False

from rtmpose3d import * # noqa: F401, F403


def parse_args():
parser = ArgumentParser()
Expand Down Expand Up @@ -124,7 +125,7 @@ def parse_args():


def process_one_image(args, detector, frame: np.ndarray, frame_idx: int,
pose_estimator: TopdownPoseEstimator3D,
pose_estimator,
pose_est_results_last: List[PoseDataSample],
pose_est_results_list: List[List[PoseDataSample]],
next_id: int, visualize_frame: np.ndarray,
Expand Down
51 changes: 25 additions & 26 deletions projects/rtmpose3d/configs/rtmw3d-l_8xb64_cocktail14-384x288.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@
use_dark=False,
root_index=(11, 12))

backbone_path = 'checkpoints/rtmpose-l_simcc-ucoco_dw-ucoco_270e-256x192-4d6dfc62_20230728.pth' # noqa

# model settings
model = dict(
type='TopdownPoseEstimator3D',
Expand All @@ -71,10 +73,7 @@
norm_cfg=dict(type='BN'),
act_cfg=dict(type='SiLU'),
init_cfg=dict(
type='Pretrained',
prefix='backbone.',
checkpoint='checkpoints/rtmpose-l_simcc-ucoco_dw-ucoco_270e-256x192-4d6dfc62_20230728.pth' # noqa
)),
type='Pretrained', prefix='backbone.', checkpoint=backbone_path)),
neck=dict(
type='CSPNeXtPAFPN',
in_channels=[256, 512, 1024],
Expand Down Expand Up @@ -112,15 +111,23 @@
label_softmax=True),
dict(
type='BoneLoss',
joint_parents=[0, 1, 2, 3, 4, 5, 6, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 50, 50, 51, 52, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 91, 92, 93, 94, 91, 96, 97, 98, 91, 100, 101, 102, 91, 104, 105, 106, 91, 108, 109, 110, 8, 112, 113, 114, 113, 112, 117, 118, 117, 112, 121, 122, 123, 112, 125, 126, 127, 112, 129, 130, 131],
joint_parents=[
0, 1, 2, 3, 4, 5, 6, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 2, 2, 2, 2, 2, 3, 3, 3, 3,
3, 50, 50, 51, 52, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 7, 91, 92, 93, 94, 91, 96, 97, 98, 91, 100,
101, 102, 91, 104, 105, 106, 91, 108, 109, 110, 8, 112,
113, 114, 113, 112, 117, 118, 117, 112, 121, 122, 123, 112,
125, 126, 127, 112, 129, 130, 131
],
use_target_weight=True,
loss_weight=2.0
)
loss_weight=2.0)
],
decoder=codec),
# test_cfg=dict(flip_test=False, mode='2d')
test_cfg=dict(flip_test=False)
)
test_cfg=dict(flip_test=False))

# base dataset settings
data_mode = 'topdown'
Expand All @@ -130,9 +137,10 @@
# pipelines
train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='RandomBackground',
bg_dir='/mnt/data/oss_beijing/mmseg/obj365v1_images',
bg_prob=0.5,
dict(
type='RandomBackground',
bg_dir='/mnt/data/oss_beijing/mmseg/obj365v1_images',
bg_prob=0.5,
),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
Expand Down Expand Up @@ -198,17 +206,14 @@
test_mode=False,
pipeline=[])


# dna rendering dataset
dna_rendering_dataset = dict(
type='DNARenderingDataset',
data_root='data/dna_rendering_part1',
data_mode='topdown',
ann_file='instances.npz',
subset_frac=0.1,
pipeline=[
dict(type='LoadMask', backend_args=backend_args)
],
pipeline=[dict(type='LoadMask', backend_args=backend_args)],
)

# mapping
Expand Down Expand Up @@ -482,10 +487,7 @@
dataset_wb = dict(
type='CombinedDataset',
metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'),
datasets=[
dataset_coco,
dataset_halpe
],
datasets=[dataset_coco, dataset_halpe],
pipeline=[],
test_mode=False,
)
Expand Down Expand Up @@ -581,11 +583,9 @@
test_mode=False,
)


# ubody dataset
scenes = [
'Magic_show',
'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow',
'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow',
'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing',
'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference'
]
Expand All @@ -604,7 +604,6 @@
pipeline=[])
ubody_datasets.append(ubody)


train_datasets = [
dataset_wb,
dataset_body,
Expand All @@ -615,7 +614,6 @@
# dna_rendering_dataset
]


# data loaders
train_dataloader = dict(
batch_size=64,
Expand Down Expand Up @@ -694,7 +692,8 @@
ann_file='annotations/coco_wholebody_val_v1.0.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
bbox_file='data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json',
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
Expand Down
48 changes: 24 additions & 24 deletions projects/rtmpose3d/configs/rtmw3d-x_8xb64_cocktail14-384x288.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@
use_dark=False,
root_index=(11, 12))

backbone_path = 'checkpoints/rtmpose-x_simcc-ucoco_pt-aic-coco_270e-384x288-f5b50679_20230822.pth' # noqa

# model settings
model = dict(
type='TopdownPoseEstimator3D',
Expand All @@ -71,10 +73,7 @@
norm_cfg=dict(type='BN'),
act_cfg=dict(type='SiLU'),
init_cfg=dict(
type='Pretrained',
prefix='backbone.',
checkpoint='checkpoints/rtmpose-x_simcc-ucoco_pt-aic-coco_270e-384x288-f5b50679_20230822.pth' # noqa
)),
type='Pretrained', prefix='backbone.', checkpoint=backbone_path)),
neck=dict(
type='CSPNeXtPAFPN',
in_channels=[320, 640, 1280],
Expand Down Expand Up @@ -112,10 +111,19 @@
label_softmax=True),
dict(
type='BoneLoss',
joint_parents=[0, 1, 2, 3, 4, 5, 6, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 50, 50, 51, 52, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 91, 92, 93, 94, 91, 96, 97, 98, 91, 100, 101, 102, 91, 104, 105, 106, 91, 108, 109, 110, 8, 112, 113, 114, 113, 112, 117, 118, 117, 112, 121, 122, 123, 112, 125, 126, 127, 112, 129, 130, 131],
joint_parents=[
0, 1, 2, 3, 4, 5, 6, 5, 6, 7, 8, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 2, 2, 2, 2, 2, 3, 3, 3, 3,
3, 50, 50, 51, 52, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 7, 91, 92, 93, 94, 91, 96, 97, 98, 91, 100,
101, 102, 91, 104, 105, 106, 91, 108, 109, 110, 8, 112,
113, 114, 113, 112, 117, 118, 117, 112, 121, 122, 123, 112,
125, 126, 127, 112, 129, 130, 131
],
use_target_weight=True,
loss_weight=2.0
)
loss_weight=2.0)
],
decoder=codec),
test_cfg=dict(flip_test=False, mode='2d')
Expand All @@ -130,9 +138,10 @@
# pipelines
train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='RandomBackground',
bg_dir='/mnt/data/oss_beijing/mmseg/obj365v1_images',
bg_prob=0.5,
dict(
type='RandomBackground',
bg_dir='/mnt/data/oss_beijing/mmseg/obj365v1_images',
bg_prob=0.5,
),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
Expand Down Expand Up @@ -198,17 +207,14 @@
test_mode=False,
pipeline=[])


# dna rendering dataset
dna_rendering_dataset = dict(
type='DNARenderingDataset',
data_root='data/dna_rendering_part1',
data_mode='topdown',
ann_file='instances.npz',
subset_frac=0.1,
pipeline=[
dict(type='LoadMask', backend_args=backend_args)
],
pipeline=[dict(type='LoadMask', backend_args=backend_args)],
)

# mapping
Expand Down Expand Up @@ -482,10 +488,7 @@
dataset_wb = dict(
type='CombinedDataset',
metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'),
datasets=[
dataset_coco,
dataset_halpe
],
datasets=[dataset_coco, dataset_halpe],
pipeline=[],
test_mode=False,
)
Expand Down Expand Up @@ -581,11 +584,9 @@
test_mode=False,
)


# ubody dataset
scenes = [
'Magic_show',
'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow',
'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow',
'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing',
'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference'
]
Expand All @@ -604,7 +605,6 @@
pipeline=[])
ubody_datasets.append(ubody)


train_datasets = [
dataset_wb,
dataset_body,
Expand All @@ -615,7 +615,6 @@
# dna_rendering_dataset
]


# data loaders
train_dataloader = dict(
batch_size=32,
Expand Down Expand Up @@ -694,7 +693,8 @@
ann_file='annotations/coco_wholebody_val_v1.0.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
bbox_file='data/coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json',
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
Expand Down
6 changes: 4 additions & 2 deletions projects/rtmpose3d/rtmpose3d/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from .loss import KLDiscretLoss2
from .pose_estimator import TopdownPoseEstimator3D
from .rtmw3d_head import RTMW3DHead
from .simcc_3d_label import SimCC3DLabel
from .loss import KLDiscretLoss2

__all__ = ['TopdownPoseEstimator3D', 'RTMW3DHead', 'SimCC3DLabel', 'KLDiscretLoss2']
__all__ = [
'TopdownPoseEstimator3D', 'RTMW3DHead', 'SimCC3DLabel', 'KLDiscretLoss2'
]
5 changes: 3 additions & 2 deletions projects/rtmpose3d/rtmpose3d/loss.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from mmpose.registry import MODELS
from mmpose.models.losses import KLDiscretLoss
from mmpose.registry import MODELS


@MODELS.register_module()
class KLDiscretLoss2(KLDiscretLoss):
Expand Down Expand Up @@ -34,4 +35,4 @@ def loss_name(self):
Returns:
str: The name of this loss item.
"""
return self._loss_name
return self._loss_name
5 changes: 3 additions & 2 deletions projects/rtmpose3d/rtmpose3d/pose_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@

import numpy as np

from mmpose.utils.typing import InstanceList, PixelDataList, SampleList
from mmpose.registry import MODELS
from mmpose.models.pose_estimators import TopdownPoseEstimator
from mmpose.registry import MODELS
from mmpose.utils.typing import InstanceList, PixelDataList, SampleList


@MODELS.register_module()
class TopdownPoseEstimator3D(TopdownPoseEstimator):
Expand Down
11 changes: 5 additions & 6 deletions projects/rtmpose3d/rtmpose3d/rtmw3d_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@

from mmpose.codecs.utils import get_simcc_maximum as get_2d_simcc_maximum
from mmpose.evaluation.functional import keypoint_mpjpe
from mmpose.models.heads import BaseHead
from mmpose.models.utils.rtmcc_block import RTMCCBlock, ScaleNorm
from mmpose.registry import KEYPOINT_CODECS, MODELS
from mmpose.utils.tensor_utils import to_numpy
from mmpose.utils.typing import (ConfigType, InstanceList, OptConfigType,
OptSampleList)
from mmpose.models.heads import BaseHead
from .utils import get_simcc_maximum

OptIntSeq = Optional[Sequence[int]]
Expand Down Expand Up @@ -244,10 +244,9 @@ def _pack_and_call(args, func):
batch_scores.append(scores)

preds = []
for keypoints_2d, keypoints, keypoints_simcc, scores in zip(batch_keypoints2d,
batch_keypoints,
batch_keypoints_simcc,
batch_scores):
for keypoints_2d, keypoints, keypoints_simcc, scores in zip(
batch_keypoints2d, batch_keypoints, batch_keypoints_simcc,
batch_scores):
pred = InstanceData(
keypoints_2d=keypoints_2d,
keypoints=keypoints,
Expand Down Expand Up @@ -347,7 +346,7 @@ def loss(
# calculate losses
losses = dict()
for i, loss_ in enumerate(self.loss_module):
if loss_.loss_name == 'loss_bone' or loss_.loss_name == 'loss_mpjpe':
if loss_.loss_name in ['loss_bone', 'loss_mpjpe']:
pred_coords = get_3d_coord(pred_x, pred_y, pred_z,
with_z_labels)
gt_coords = get_3d_coord(gt_x, gt_y, gt_z, with_z_labels)
Expand Down
8 changes: 4 additions & 4 deletions projects/rtmpose3d/rtmpose3d/simcc_3d_label.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,8 @@
import numpy as np
from numpy import ndarray

from mmpose.registry import KEYPOINT_CODECS
from mmpose.codecs.base import BaseKeypointCodec

from mmpose.registry import KEYPOINT_CODECS
from .utils import get_simcc_maximum


Expand Down Expand Up @@ -155,8 +154,9 @@ def encode(self,
with_z_label = True
else:
if keypoints.shape != np.zeros([]).shape:
keypoints_z = np.ones((keypoints.shape[0],
keypoints.shape[1], 1), dtype=np.float32)
keypoints_z = np.ones(
(keypoints.shape[0], keypoints.shape[1], 1),
dtype=np.float32)
keypoints = np.concatenate([keypoints, keypoints_z], axis=-1)
x, y, z, keypoint_weights = self._generate_gaussian(
keypoints, keypoints_visible)
Expand Down
Loading

0 comments on commit fb15f42

Please sign in to comment.