-
Notifications
You must be signed in to change notification settings - Fork 12
/
petr_r50_16x2_100e_coco.py
137 lines (137 loc) · 5.36 KB
/
petr_r50_16x2_100e_coco.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
_base_ = [
'../_base_/datasets/coco_keypoint.py', '../_base_/default_runtime.py'
]
model = dict(
type='opera.PETR',
backbone=dict(
type='mmdet.ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='mmdet.ChannelMapper',
in_channels=[512, 1024, 2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
bbox_head=dict(
type='opera.PETRHead',
num_query=300,
num_classes=1, # only person
in_channels=2048,
sync_cls_avg_factor=True,
with_kpt_refine=True,
as_two_stage=True,
transformer=dict(
type='opera.PETRTransformer',
encoder=dict(
type='mmcv.DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='mmcv.BaseTransformerLayer',
attn_cfgs=dict(
type='mmcv.MultiScaleDeformableAttention',
embed_dims=256),
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='opera.PetrTransformerDecoder',
num_layers=3,
return_intermediate=True,
transformerlayers=dict(
type='mmcv.DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='mmcv.MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
dict(
type='opera.MultiScaleDeformablePoseAttention',
embed_dims=256)
],
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm'))),
hm_encoder=dict(
type='mmcv.DetrTransformerEncoder',
num_layers=1,
transformerlayers=dict(
type='mmcv.BaseTransformerLayer',
attn_cfgs=dict(
type='mmcv.MultiScaleDeformableAttention',
embed_dims=256,
num_levels=1),
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
refine_decoder=dict(
type='mmcv.DeformableDetrTransformerDecoder',
num_layers=2,
return_intermediate=True,
transformerlayers=dict(
type='mmcv.DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='mmcv.MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
dict(
type='mmcv.MultiScaleDeformableAttention',
embed_dims=256,
im2col_step=128)
],
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))),
positional_encoding=dict(
type='mmcv.SinePositionalEncoding',
num_feats=128,
normalize=True,
offset=-0.5),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
loss_kpt=dict(type='mmdet.L1Loss', loss_weight=70.0),
loss_kpt_rpn=dict(type='mmdet.L1Loss', loss_weight=70.0),
loss_oks=dict(type='opera.OKSLoss', loss_weight=2.0),
loss_hm=dict(type='opera.CenterFocalLoss', loss_weight=4.0),
loss_kpt_refine=dict(type='mmdet.L1Loss', loss_weight=80.0),
loss_oks_refine=dict(type='opera.OKSLoss', loss_weight=3.0)),
train_cfg=dict(
assigner=dict(
type='opera.PoseHungarianAssigner',
cls_cost=dict(type='mmdet.FocalLossCost', weight=2.0),
kpt_cost=dict(type='opera.KptL1Cost', weight=70.0),
oks_cost=dict(type='opera.OksCost', weight=7.0))),
test_cfg=dict(max_per_img=100)) # set 'max_per_img=20' for time counting
# optimizer
optimizer = dict(
type='AdamW',
lr=2e-4,
weight_decay=0.0001,
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1),
'sampling_offsets': dict(lr_mult=0.1),
'reference_points': dict(lr_mult=0.1)
}))
optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[80])
runner = dict(type='EpochBasedRunner', max_epochs=100)
checkpoint_config = dict(interval=1, max_keep_ckpts=20)