-
Notifications
You must be signed in to change notification settings - Fork 4
/
pbev_seabird_kitti360_val.ini
190 lines (179 loc) · 4.68 KB
/
pbev_seabird_kitti360_val.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
[general]
# Number of epochs between validations
val_interval = 20
save_interval = 1
# Number of steps before outputting a log entry
log_interval = 50
# Panoptic evaluation parameters
score_threshold = 0.5
overlap_threshold = 0.5
min_stuff_area = 0
cudnn_benchmark = no
[base]
# Architecture for the body
base = efficientdet-d3
# Path to pre-trained weights
weights =
# Normalization mode:
# -- bn: in-place batch norm everywhere
# -- syncbn: synchronized in-place batch norm everywhere
# -- syncbn+bn: synchronized in-place batch norm in the static part of the network, in-place batch norm everywhere else
# -- gn: group norm everywhere
# -- syncbn+gn: synchronized in-place batch norm in the static part of the network, group norm everywhere else
# -- off: do not normalize activations (scale and bias are kept)
normalization_mode = syncbn
# Activation: 'leaky_relu' or 'elu'
activation = leaky_relu
activation_slope = 0.01
# Group norm parameters
gn_groups = 0
# Additional parameters for the body
base_params = {}
# Number of frozen modules: in [1, 5]
num_frozen = 0
# Wether to freeze BN modules
bn_frozen = no
[fpn]
fpn_channels = 256
extra_scales = 0
# Input Settings
inputs = ["mod2", "mod3", "mod4", "mod5"]
# Meta-info
out_strides = (4, 8, 16, 32)
interpolation = nearest
[transformer]
tfm_scales = (4, 8, 16, 32)
in_channels = 160
tfm_channels = 128
bev_ms_channels = 256
use_init_theta = yes
# Train segmentor on buildings (2) and cars (9)
front_vertical_classes = (2, 9)
front_flat_classes = (0, 1, 5, 7, 3, 4, 6, 8, 10, 11)
bev_vertical_classes = (2, 9)
bev_flat_classes = (0, 1, 5, 6, 3, 4, 7, 8, 10)
[rpn]
hidden_channels = 256
stride = 1
# Anchor settings
anchor_ratios = (1., 0.5, 2.)
anchor_scale = (4, 8, 16)
# Proposal settings
nms_threshold = 0.7
num_pre_nms_train = 12000
num_post_nms_train = 2000
num_pre_nms_val = 6000
num_post_nms_val = 1000
min_size = 0
# Anchor matcher settings
num_samples = 256
pos_ratio = .5
pos_threshold = .7
neg_threshold = .3
void_threshold = 0.7
# FPN-specific settings
fpn_min_level = 0
fpn_levels = 4
# Loss settings
sigma = 3.
[roi]
roi_size = (14, 14)
# Matcher settings
num_samples = 512
pos_ratio = .25
pos_threshold = .5
neg_threshold_hi = .5
neg_threshold_lo = 0.
void_threshold = 0.7
void_is_background = no
# Prediction generator settings
nms_threshold = 0.3
score_threshold = 0.1
max_predictions = 100
# FPN-specific settings
fpn_min_level = 0
fpn_levels = 4
fpn_canonical_scale = 224
fpn_canonical_level = 2
# Loss settings
sigma = 1.
bbx_reg_weights = (10., 10., 5., 5.)
[sem]
fpn_min_level = 0
fpn_levels = 4
pooling_size = (64, 64)
# Loss settings
ohem = .25
classes = 2
use_dice_loss = 1
[det]
init_segmentor = "output/pbev_seabird_kitti360_val_stage1/saved_models/model_19.pth"
num_classes = 2
det_classes = ['Building','Car']
use_uncertainty = false
use_per_class_uncertainty = false
pred_y_pix = false
norm_y_pix = false
app_feat = "bev"
K = 50
conf_thresh = 0.1
det_down_ratio = 4
[optimizer]
base_lr = 0.0025
weight_decay = 0.0001
weight_decay_norm = no
momentum = 0.9
nesterov = yes
clip_gradient = true
loss_weights = {"sem_loss": 35, "vf_loss": 1, "v_region_loss": 10, "f_region_loss": 10, "det_hm_loss": 7, "det_off_loss": 7, "det_wh_loss": 7, "det_h3d_loss": 7, "det_y3d_loss": 7, "det_cls_theta_loss": 7}
[scheduler]
epochs = 20
# Scheduler type: 'linear', 'step', 'poly' or 'multistep'
type = multistep_multigamma
# When to update the learning rate: 'batch', 'epoch'
update_mode = epoch
# Additional parameters for the scheduler
# -- linear
# from: initial lr multiplier
# to: final lr multiplier
# -- step
# step_size: number of steps between lr decreases
# gamma: multiplicative factor
# -- poly
# gamma: exponent of the polynomial
# -- multistep
# milestones: step indicies where the lr decreases will be triggered
# -- multistep_multigamma
# gamma: List containing the factor wrt the base_LR by which the LR decreases
# lr[i] = base_lr * gamma[i]
params = {"milestones": [10, 15], "gamma": [0.5, 0.2]}
burn_in_steps = 0
burn_in_start = 0.00333
[cameras]
intrinsics = {"fx": 552.554261, "fy": 552.554261, "px": 682.049453, "py": 238.769549}
extrinsics = {"translation": (0.8, 0.3, 1.55), "rotation": (-85, 0, 180)}
bev_params = {"f": 336, "cam_z": 25}
[dataloader]
# Image size parameters
shortest_size = 376
longest_max_size = 1408
# Batch size
train_batch_size = 2
val_batch_size = 2
# Augmentation parameters
rgb_mean = (0.485, 0.456, 0.406)
rgb_std = (0.229, 0.224, 0.225)
random_flip = yes
scale = 1
bev_crop = (768, 704)
front_resize = (384, 1408)
random_brightness = (0.8, 1.2)
random_contrast = (0.8, 1.2)
random_saturation = (1, 1)
random_hue = (0, 0)
# Number of worker threads
train_workers = 4
val_workers = 4
# Subsets
train_set = train_det
val_set = val_det_samp