-
Notifications
You must be signed in to change notification settings - Fork 13
/
config.py
executable file
·138 lines (97 loc) · 4.16 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import os
from easydict import EasyDict as edict
import torch
import numpy as np
from utils.transforms import *
# Dataset path
PATH = edict()
PATH.DB_ROOT = '../data/kaist-rgbt/'
PATH.JSON_GT_FILE = os.path.join('kaist_annotations_test20.json' )
# train
train = edict()
train.day = "all"
train.img_set = f"train-all-02.txt"
train.checkpoint = None ## Load chekpoint
train.batch_size = 12 # batch size
train.start_epoch = 0 # start at this epoch
train.epochs = 40 # number of epochs to run without early-stopping
train.epochs_since_improvement = 3 # number of epochs since there was an improvement in the validation metric
train.best_loss = 100. # assume a high loss at first
train.lr = 1e-4 # learning rate
train.momentum = 0.9 # momentum
train.weight_decay = 5e-4 # weight decay
train.grad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculation
train.print_freq = 10
train.annotation = "AR-CNN" # AR-CNN, Sanitize, Original
# test & eval
test = edict()
test.result_path = './result' ### coco tool. Save Results(jpg & json) Path
test.day = "all" # all, day, night
test.img_set = f"test-{test.day}-20.txt"
test.annotation = "AR-CNN"
test.input_size = [512., 640.]
### test model ~ eval.py
test.checkpoint = "./jobs/best_checkpoint.pth.tar"
test.batch_size = 4
### train_eval.py
test.eval_batch_size = 1
# KAIST Image Mean & STD
## RGB
IMAGE_MEAN = [0.3465, 0.3219, 0.2842]
IMAGE_STD = [0.2358, 0.2265, 0.2274]
## Lwir
LWIR_MEAN = [0.1598]
LWIR_STD = [0.0813]
# dataset
dataset = edict()
dataset.workers = 8
dataset.OBJ_LOAD_CONDITIONS = {
'train': {'hRng': (12, np.inf), 'xRng':(5, 635), 'yRng':(5, 507), 'wRng':(-np.inf, np.inf)},
'test': {'hRng': (-np.inf, np.inf), 'xRng':(5, 635), 'yRng':(5, 507), 'wRng':(-np.inf, np.inf)},
}
# Fusion Dead Zone
'''
Fusion Dead Zone
The input image of the KAIST dataset is input in order of [RGB, thermal].
Each case is as follows :
orignal, blackout_r, blackout_t, sidesblackout_a, sidesblackout_b, surroundingblackout
'''
FDZ_case = edict()
FDZ_case.original = ["None", "None"]
FDZ_case.blackout_r = ["blackout", "None"]
FDZ_case.blackout_t = ["None", "blackout"]
FDZ_case.sidesblackout_a = ["SidesBlackout_R", "SidesBlackout_L"]
FDZ_case.sidesblackout_b = ["SidesBlackout_L", "SidesBlackout_R"]
FDZ_case.surroundingblackout = ["None", "SurroundingBlackout"]
# main
args = edict(path=PATH,
train=train,
test=test,
dataset=dataset,
FDZ_case=FDZ_case)
args.device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
args.exp_time = None
args.exp_name = None
args.n_classes = 3
## Semi Unpaired Augmentation
args.upaired_augmentation = ["TT_RandomHorizontalFlip",
"TT_FixedHorizontalFlip",
"TT_RandomResizedCrop"]
## Train dataset transform
args["train"].img_transform = Compose([ ColorJitter(0.3, 0.3, 0.3),
ColorJitterLWIR(contrast=0.3) ])
args["train"].co_transform = Compose([ TT_RandomHorizontalFlip(p=0.5, flip=0.5),
TT_RandomResizedCrop([512,640], \
scale=(0.25, 4.0), \
ratio=(0.8, 1.2)),
ToTensor(), \
Normalize(IMAGE_MEAN, IMAGE_STD, 'R'), \
Normalize(LWIR_MEAN, LWIR_STD, 'T') ], \
args=args)
## Test dataset transform
args["test"].img_transform = Compose([ ])
args["test"].co_transform = Compose([Resize(test.input_size), \
ToTensor(), \
Normalize(IMAGE_MEAN, IMAGE_STD, 'R'), \
Normalize(LWIR_MEAN, LWIR_STD, 'T')
])