-
Notifications
You must be signed in to change notification settings - Fork 61
/
qdtrack-frcnn_r50_fpn_12e_bdd100k.py
executable file
·123 lines (123 loc) · 3.97 KB
/
qdtrack-frcnn_r50_fpn_12e_bdd100k.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# model settings
_base_ = '../_base_/qdtrack_faster_rcnn_r50_fpn.py'
model = dict(
detector=dict(roi_head=dict(bbox_head=dict(num_classes=8))),
track_head=dict(
track_train_cfg=dict(
sampler=dict(
type='CombinedSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=3,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))),
tracker=dict(
type='QuasiDenseEmbedTracker',
init_score_thr=0.7,
obj_score_thr=0.3,
match_score_thr=0.5,
memo_tracklet_frames=10,
memo_backdrop_frames=1,
memo_momentum=0.8,
nms_conf_thr=0.5,
nms_backdrop_iou_thr=0.3,
nms_class_iou_thr=0.7,
with_cats=True,
match_metric='bisoftmax'),
# model training and testing settings
)
# dataset settings
dataset_type = 'BDDVideoDataset'
data_root = 'data/bdd/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadMultiImagesFromFile'),
dict(type='SeqLoadAnnotations', with_bbox=True, with_ins_id=True),
dict(type='SeqResize', img_scale=(1296, 720), keep_ratio=True),
dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5),
dict(type='SeqNormalize', **img_norm_cfg),
dict(type='SeqPad', size_divisor=32),
dict(type='SeqDefaultFormatBundle'),
dict(
type='SeqCollect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_match_indices'],
ref_prefix='ref'),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1296, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=[
dict(
type=dataset_type,
ann_file=data_root +
'labels/box_track_20/box_track_train_cocofmt.json',
img_prefix=data_root + 'images/track/train/',
key_img_sampler=dict(interval=1),
ref_img_sampler=dict(num_ref_imgs=1, scope=3, method='uniform'),
pipeline=train_pipeline),
dict(
type=dataset_type,
load_as_video=False,
ann_file=data_root + 'labels/det_20/det_train_cocofmt.json',
img_prefix=data_root + 'images/100k/train/',
pipeline=train_pipeline)
],
val=dict(
type=dataset_type,
ann_file=data_root + 'labels/box_track_20/box_track_val_cocofmt.json',
img_prefix=data_root + 'images/track/val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'labels/box_track_20/box_track_val_cocofmt.json',
img_prefix=data_root + 'images/track/val/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 1000,
step=[8, 11])
# checkpoint saving
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
evaluation = dict(metric=['bbox', 'track'], interval=2)