-
Notifications
You must be signed in to change notification settings - Fork 3
/
defaults.py
320 lines (226 loc) · 7.92 KB
/
defaults.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
"""
File: defaults.py
Author: Binguan Liu
Date: Dec 14, 2020
Brief: default configurations for segmentation
"""
from yacs.config import CfgNode as CN
_C = CN()
# -------------------------------------------------------------
# Model options
# -------------------------------------------------------------
_C.MODEL = CN()
# Model architecture
_C.MODEL.ARCH = "unet"
# Model architecture
_C.MODEL.ENCODER = "resnet50"
# Pretrained weights for encoder net
_C.MODEL.ENCODER_WEIGHTS = ""
# activation function
_C.MODEL.ACT_FUNC = "softmax"
# The path of pretrained model
_C.MODEL.PRETRAINED = ""
# The number of classes to predict
_C.MODEL.NUM_CLASSES = 8
# Mode : binary, multiclass or multilabel
_C.MODEL.MODE = "binary"
# The number of input channels
_C.MODEL.INPUT_CHANNELS = 3
# Rate of dropout
_C.MODEL.DROPOUT = -1.0
# -------------------------------------------------------------
# Dataset options
# -------------------------------------------------------------
_C.DATA = CN()
_C.DATA.NAME = "retinal-lesions"
# The root directory of dataset
_C.DATA.DATA_ROOT = ""
# The label values of pixels in the mask
_C.DATA.LABEL_VALUES = [255]
# Available for retinal-lesions dataset.
# If true, convert the data setting to binary classification
_C.DATA.BINARY = False
# If true, get and output the region size as well
_C.DATA.REGION_SIZE = False
# If true, get and output the region size normalized by the area as well
_C.DATA.NORMALIZE_REGION_SIZE = False
# If true, get and output the region number as well
_C.DATA.REGION_NUMBER = False
# The mean value of the raw pixels across the R G B channels.
_C.DATA.MEAN = [0.485, 0.456, 0.406]
# The std value of the raw pixels across the R G B channels.
_C.DATA.STD = [0.229, 0.224, 0.225]
# The target size of image resize
_C.DATA.RESIZE = [512, 512]
# How many subprocesses to use for data loading.
_C.DATA.NUM_WORKERS = 8
# For retianl-lesion-class setting
_C.DATA.CLASS_NAME = "hard_exudate"
# For poly dataset setting
_C.DATA.SET_NAME = "Kvasir"
# -------------------------------------------------------------
# Optimizer options
# -------------------------------------------------------------
_C.LOSS = CN()
# Name of loss function
_C.LOSS.NAME = "bce_logit"
# Hyper parameter of loss
_C.LOSS.ALPHA = 0.1
# Step size of adjusting hyper weight
# If zero, it won't change the weight during the training
_C.LOSS.ALPHA_STEP_SIZE = 0
# Factor of increasing hyper weight when it triggers adjusting
_C.LOSS.ALPHA_FACTOR = 5.0
# temperature
_C.LOSS.TEMP = 1.0
# Label smoothing for soft bce loss
_C.LOSS.LABEL_SMOOTHING = 0.1
# The target value that is igored and does not contribute to param optimization
_C.LOSS.IGNORE_INDEX = 255
# For some losses, the background index is required
_C.LOSS.BACKGROUND_INDEX = -1
# Weighing different classes if necessary
_C.LOSS.CLASS_WEIGHTS = []
# -------------------------------------------------------------
# Optimizer options
# -------------------------------------------------------------
_C.SOLVER = CN()
# Optimization method
_C.SOLVER.OPTIMIZING_METHOD = "adam"
# Base learning rate
_C.SOLVER.BASE_LR = 0.1
# minimum learning rate (trigged for some schedulers)
_C.SOLVER.MIN_LR = 0.0
# Learning rate policy
_C.SOLVER.LR_POLICY = "reduce_on_plateau"
# Available for ReduceLROnPlateau
_C.SOLVER.FACTOR = 0.5
_C.SOLVER.PATIENCE = 3
_C.SOLVER.REDUCE_MODE = "min"
# Momentum.
_C.SOLVER.MOMENTUM = 0.9
# Momentum dampening.
_C.SOLVER.DAMPENING = 0.0
# Nesterov momentum.
_C.SOLVER.NESTEROV = True
# Exponential decay factor.
_C.SOLVER.GAMMA = 0.1
# L2 regularization.
_C.SOLVER.WEIGHT_DECAY = 1e-4
# Step size for 'exp' and 'cos' policies (in epochs).
_C.SOLVER.STEP_SIZE = 10
# Maximal number of epochs.
_C.SOLVER.MAX_EPOCH = 300
# Number of warmup epochs.
_C.SOLVER.WARMUP_EPOCH = 0
# Momentum.
_C.SOLVER.MOMENTUM = 0.9
# -------------------------------------------------------------
# Training options
# -------------------------------------------------------------
_C.TRAIN = CN()
# Total mini-batch size.
_C.TRAIN.BATCH_SIZE = 4
# train data list path (A relative path to _C.DATA.DATA_ROOT or an absoulte path)
_C.TRAIN.DATA_PATH = ""
# Evaluate model on test data every eval period epochs.
_C.TRAIN.EVAL_PERIOD = 1
# Save model checkpoint every period epochs.
_C.TRAIN.CHECKPOINT_PERIOD = 1
# Save model checkpoint after period epochs.
_C.TRAIN.CHECKPOINT_AFTER_PERIOD = 5
# If True, caculate metric (auc/F1/dice/...) in training phase.
# May be very costy due to the large size of traing samples
_C.TRAIN.CALCULATE_METRIC = False
# Resume training from the latest checkpoint in the output directory
_C.TRAIN.AUTO_RESUME = False
# -------------------------------------------------------------
# Validation options
# -------------------------------------------------------------
_C.VAL = CN()
# Total mini-batch size.
_C.VAL.BATCH_SIZE = 4
# Val data list path (A relative path to _C.DATA.DATA_ROOT or an absoulte path)
_C.VAL.DATA_PATH = ""
# -------------------------------------------------------------
# Test options (Only available when running test script)
# -------------------------------------------------------------
_C.TEST = CN()
# Val data list path (A relative path to _C.DATA.DATA_ROOT or an absoulte path)
_C.TEST.DATA_PATH = ""
# The split name
_C.TEST.SPLIT = "test"
# Total mini-batch size.
_C.TEST.BATCH_SIZE = 4
# The path of the testing checkpoint file.
# If empty, it will load model indicated in the best_checkpoint file
_C.TEST.CHECKPOINT_PATH = ""
# If True, it will load model indicated in the best_checkpoint file
_C.TEST.BEST_CHECKPOINT = False
# The model to be tested indexed by epoch (start from 1)
_C.TEST.MODEL_EPOCH = 0
# If True, it will save the predicted results into one numpy array file
_C.TEST.SAVE_PREDICTS = False
# -------------------------------------------------------------
# Transductive options (Only available when running transductive script)
# -------------------------------------------------------------
_C.TRANSDUCTIVE = CN()
# If true, employ ground-truth labels
_C.TRANSDUCTIVE.USE_LABEL = True
# Hyper parameter of loss
_C.TRANSDUCTIVE.ALPHA = 1.0
# Optimization method
_C.TRANSDUCTIVE.OPTIMIZING_METHOD = "sgd"
# Base learning rate
_C.TRANSDUCTIVE.BASE_LR = 0.1
# Base learning rate
_C.TRANSDUCTIVE.MAX_ITER = 50
# Momentum.
_C.TRANSDUCTIVE.MOMENTUM = 0.9
# Momentum dampening.
_C.TRANSDUCTIVE.DAMPENING = 0.0
# Nesterov momentum.
_C.TRANSDUCTIVE.NESTEROV = True
# Exponential decay factor.
_C.TRANSDUCTIVE.GAMMA = 0.1
# L2 regularization.
_C.TRANSDUCTIVE.WEIGHT_DECAY = 1e-4
_C.TRANSDUCTIVE.VERBOSE = False
# -------------------------------------------------------------
# Wandb(https://wandb.ai/) : Experiment management platform
# -------------------------------------------------------------
_C.WANDB = CN()
_C.WANDB.ENABLE = False
_C.WANDB.PROJECT = "iccv2021"
_C.WANDB.ENTITY = "newton"
# -------------------------------------------------------------
# Misc options
# -------------------------------------------------------------
# Output basedir.
_C.OUTPUT_DIR = "./output"
# Set seed to negative to fully randomize everything.
# Set seed to positive to use a fixed seed. Note that a fixed seed increases
# reproducibility but does not guarantee fully deterministic behavior.
# Disabling all parallelism further increases reproducibility.
_C.RNG_SEED = -1
# Benchmark different cudnn algorithms.
# If input images have very different sizes, this option will have large overhead
# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
# If input images have the same or similar sizes, benchmark is often helpful.
_C.CUDNN_BENCHMARK = False
# LOG preriod in iters
_C.LOG_PERIOD = 10
# If True, log the model info.
_C.LOG_MODEL_INFO = True
# The device name
_C.DEVICE = "cuda:0"
# If True, perform test after training
_C.PERFORM_TEST = False
# Threshold for determining the positive segmentaiton results
_C.THRES = 0.5
def get_cfg() -> CN:
"""
Get a copy of the default configuration.
"""
cfg = _C.clone()
return cfg