-
Notifications
You must be signed in to change notification settings - Fork 435
/
maskrcnn_efficientnetb2b.yaml
119 lines (114 loc) · 3.65 KB
/
maskrcnn_efficientnetb2b.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
model:
class_path: otx.algo.instance_segmentation.maskrcnn.MaskRCNNEfficientNet
init_args:
label_info: 80
optimizer:
class_path: torch.optim.SGD
init_args:
lr: 0.007
momentum: 0.9
weight_decay: 0.001
scheduler:
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
num_warmup_steps: 100
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.1
patience: 4
monitor: val/map_50
engine:
task: INSTANCE_SEGMENTATION
device: auto
callback_monitor: val/map_50
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
data:
task: INSTANCE_SEGMENTATION
config:
stack_images: true
data_format: coco_instances
include_polygons: true
train_subset:
batch_size: 4
num_workers: 8
transforms:
- class_path: otx.core.data.transform_libs.torchvision.Resize
init_args:
keep_ratio: true
transform_bbox: true
transform_mask: true
scale:
- 1024
- 1024
- class_path: otx.core.data.transform_libs.torchvision.Pad
init_args:
pad_to_square: true
transform_mask: true
- class_path: otx.core.data.transform_libs.torchvision.RandomFlip
init_args:
prob: 0.5
is_numpy_to_tvtensor: true
- class_path: torchvision.transforms.v2.ToDtype
init_args:
dtype: ${as_torch_dtype:torch.float32}
scale: False
- class_path: torchvision.transforms.v2.Normalize
init_args:
mean: [123.675, 116.28, 103.53]
std: [1.0, 1.0, 1.0]
sampler:
class_path: otx.algo.samplers.balanced_sampler.BalancedSampler
val_subset:
batch_size: 1
num_workers: 4
transforms:
- class_path: otx.core.data.transform_libs.torchvision.Resize
init_args:
keep_ratio: true
transform_bbox: false
transform_mask: false
scale:
- 1024
- 1024
- class_path: otx.core.data.transform_libs.torchvision.Pad
init_args:
pad_to_square: true
transform_mask: false
is_numpy_to_tvtensor: true
- class_path: torchvision.transforms.v2.ToDtype
init_args:
dtype: ${as_torch_dtype:torch.float32}
scale: False
- class_path: torchvision.transforms.v2.Normalize
init_args:
mean: [123.675, 116.28, 103.53]
std: [1.0, 1.0, 1.0]
test_subset:
batch_size: 1
num_workers: 4
transforms:
- class_path: otx.core.data.transform_libs.torchvision.Resize
init_args:
keep_ratio: true
transform_bbox: false
transform_mask: false
scale:
- 1024
- 1024
- class_path: otx.core.data.transform_libs.torchvision.Pad
init_args:
pad_to_square: true
transform_mask: false
is_numpy_to_tvtensor: true
- class_path: torchvision.transforms.v2.ToDtype
init_args:
dtype: ${as_torch_dtype:torch.float32}
scale: False
- class_path: torchvision.transforms.v2.Normalize
init_args:
mean: [123.675, 116.28, 103.53]
std: [1.0, 1.0, 1.0]