-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
200 lines (171 loc) · 10.4 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import argparse
import logging
import logging.config
import sys
import time
from argparse import Namespace
import torch
import torch.backends.cudnn as cudnn
from datetime import datetime
import baselines
import config
from init import init_net, init_settings, set_paths
from utils.results_manager import ResultsManager
from utils.utils import timedelta_to_str
from dilam import dilam
def main(args):
if args.kitti_to_yolo_labels:
from utils.utils import kitti_labels_to_yolo
kitti_labels_to_yolo(args.kitti_to_yolo_labels)
exit()
cudnn.benchmark = True
start_time = datetime.now()
log.info('------------------------------------ NEW RUN ------------------------------------')
log.info(f'Running: {" ".join(sys.argv)}')
log.info('Full args list:')
for arg in vars(args):
log.info(f'{arg}: {getattr(args, arg)}')
log.info('---------------------------------------------------------------------------------')
results = ResultsManager('mAP@50')
init_settings(args)
if args.usr:
set_paths(args)
net = init_net(args, cls=True)
for run in range(args.num_runs):
net.load_state_dict(torch.load(args.ckpt_path))
for args.severity_idx in range(args.num_severities):
if not args.no_dilam:
dilam(net, args, verbose=args.verbose)
if 'disc' in args.baselines:
baselines.disc(args, net)
if 'source_only' in args.baselines:
baselines.source_only(net, args)
for scenario in args.scenario:
if 'disjoint' in args.baselines:
baselines.disjoint(net, args, scenario)
if 'freezing' in args.baselines:
baselines.freezing(net, args, scenario)
if 'fine_tuning' in args.baselines:
baselines.fine_tuning(net, args, scenario)
if 'joint_training' in args.baselines:
baselines.joint_training(net, args, scenario)
if 'joint_training_affine' in args.baselines:
baselines.joint_training_bn_affine(net, args, scenario)
if results.has_results():
timestamp_str = time.strftime('%b-%d-%Y_%H%M', time.localtime())
results.save_to_file(file_name=f'{timestamp_str}_raw_results.pkl')
results.print_summary_latex()
results.plot_summary(f'{timestamp_str}_summary_plot.png')
if args.num_runs > 1:
results.reset_results()
log.info(f'{">" * 30} FINISHED RUN #{run} {"<" * 30}')
runtime = datetime.now() - start_time
log.info(f'Runtime so far: {timedelta_to_str(runtime)}')
torch.cuda.empty_cache()
if args.num_runs > 1:
results.print_multiple_runs_results()
results.print_multiple_runs_results_map50to95()
runtime = datetime.now() - start_time
log.info(f'Execution finished in {timedelta_to_str(runtime)}')
# Log uncaught exceptions, that aren't keyboard interrupts
def handle_exception(exception_type, value, traceback):
if issubclass(exception_type, KeyboardInterrupt):
sys.__excepthook__(exception_type, value, traceback)
return
log.exception('Exception occured:', exc_info=(exception_type, value, traceback))
sys.excepthook = handle_exception
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# General settings
parser.add_argument('--usr', default=None, type=str)
parser.add_argument('--dataroot', default='path/to/dataroot')
parser.add_argument('--ckpt_path', default='path/to/checkpoint.pt')
parser.add_argument('--dataset', default='kitti')
parser.add_argument('--model', default=None, type=str)
parser.add_argument('--logfile', default='log.txt', type=str)
parser.add_argument('--tasks', default=[], type=str, nargs='*',
help='List of tasks to run (in given order), empty means defaults from config.KITTI_TASKS')
all_baselines = ['disc', 'source_only', 'disjoint', 'freezing', 'fine_tuning', 'joint_training', 'joint_training_affine']
parser.add_argument('--baselines', default=[], type=str, nargs='*', choices=all_baselines,
help='List of baselines to run')
parser.add_argument('--scenario', default=['online'], type=str, nargs='*',
help='Scenarios to run (online and/or offline)')
parser.add_argument('--fog_severities', default=['fog_30'], type=str, nargs='*')
parser.add_argument('--rain_severities', default=['200mm'], type=str, nargs='*')
parser.add_argument('--snow_severities', default=['5'], type=str, nargs='*')
parser.add_argument('--checkpoints_path', default='checkpoints', help='path where model checkpoints will be saved')
parser.add_argument('--num_runs', default=1, type=int)
# DILAM
parser.add_argument('--no_dilam_adapt', action='store_true', help='do not run DILAM adapt')
parser.add_argument('--dilam_adapt_all', action='store_true',
help='DILAM adapt for all BN layers. Without this option '
'the first 2 BN layers are not adapted.')
parser.add_argument('--knn', action='store_true', help='Use KNN classifier. Implies dilam_adapt_all.')
parser.add_argument('--cls_ckpt_path', default='checkpoints/kitti_cls_head_single_layer.pt',
help='path to linear classification head checkpoint')
parser.add_argument('--no_dilam', action='store_true', help='do not run DILAM')
parser.add_argument('--no_augment_dilam', action='store_true', help='do not use augmented inference in DILAM')
parser.add_argument('--dilam_adapt_batch_size', default=30, type=int)
# DUA/DISC adaption
parser.add_argument('--num_samples', default=50, type=int)
parser.add_argument('--decay_factor', default=0.94, type=float)
parser.add_argument('--min_mom', default=0.005, type=float)
parser.add_argument('--no_disc_adaption', action='store_true',
help='skip DISC adaption phase (assumes existing BN running estimates checkpoint)')
# Learning & Loading
parser.add_argument('--lr', default=0.01, type=float)
parser.add_argument('--initial_task_lr', default=0.01, type=float)
parser.add_argument('--epochs', default=150, type=int)
parser.add_argument('--batch_size', default=8, type=int)
parser.add_argument('--workers', type=int, default=1, help='maximum number of dataloader workers')
parser.add_argument('--yolo_lr_adjustment', type=str, default='thirds',
choices=['thirds', 'linear_lr', 'cosine'],
help='how yolov3 training reduces learning rate')
# LR scheduler and early stopping
# for yolov3 these setting only apply with yolo_lr_adjustment set to 'thirds',
# in which case the reduction by a factor of 3 can also be changed by setting
# lr_factor to a different value
parser.add_argument('--patience', default=4, type=int)
parser.add_argument('--lr_factor', default=1/3, type=float)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--max_unsuccessful_reductions', default=3, type=int)
# yolov3
parser.add_argument('--weights', type=str, default='yolov3.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='models/yolov3.yaml', help='model.yaml path')
parser.add_argument('--img_size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--start_disjoint_offline_from_initial', action='store_true',
help='start offline disjoint training from checkpoint trained on initial task')
parser.add_argument('--use_freezing_heads_ckpts', action='store_true',
help='Use freezing baseline heads from a previous run. '
'Without this option previously saved heads are moved.')
parser.add_argument('--conf_thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou_thres', type=float, default=0.6, help='IOU threshold for NMS')
# yolov3 untested
parser.add_argument('--augment', default = False, action='store_true', help='augmented inference')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache_images', action='store_true', help='cache images for faster training')
parser.add_argument('--image_weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--multi_scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single_cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--sync_bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--log_imgs', type=int, default=16, help='number of images for W&B logging, max 100')
parser.add_argument('--log_artifacts', action='store_true', help='log artifacts, i.e. final trained model')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist_ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
# other
parser.add_argument('--kitti_to_yolo_labels', default=None, type=str,
help='Generate YOLO style labels from KITTI labels, given original KITTI root dir')
args: Namespace = parser.parse_args()
config.LOGGER_CFG['handlers']['file_handler']['filename'] = args.logfile
logging.config.dictConfig(config.LOGGER_CFG)
log = logging.getLogger('MAIN')
main(args)