-
Notifications
You must be signed in to change notification settings - Fork 5
/
configuration.py
162 lines (144 loc) · 5.36 KB
/
configuration.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#!/usr/bin/env python3
"""
script including
class object with global settings
"""
from collections import namedtuple
import os
from os.path import join
ModelConfig = namedtuple(
"Model",
[
"name", # Name of the model
"module_name", # Name of the module the model class is defined in
"class_name", # Name of the model class within the module
"kwargs", # Dictionary of keyword arguments to supply to the model class
# during loading
"model_weights", # Path to the pretrained model weights
],
)
DatasetConfig = namedtuple(
"Dataset",
[
"name", # Name of the dataset
"module_name", # Name of the module the dataset class is defined in
"class_name", # Name of the dataset class within the module
"kwargs", # Dictionary of keyword arguments to supply to the dataset class
# during loading
],
)
models = dict(
deeplabv3plus=ModelConfig(
"deeplabv3plus",
"src.model.deepv3",
"DeepWV3Plus",
dict(num_classes=19),
"/path/to/the/pretrained/deeplabv3plus/weights.pth",
)
)
# The constructor of your meta model needs to accept an integer (as first argument) to
# specify the number of input features.
# The output of your meta model is expected to be linear and to have a single node
meta_models = dict(
meta_nn=ModelConfig(
"meta_nn", "src.MetaSeg.functions.meta_nn", "MetaNN", {}, "./src/meta_nn.pth"
)
)
datasets = dict(
cityscapes=DatasetConfig(
"cityscapes",
"src.datasets.cityscapes",
"Cityscapes",
dict(root="/data/datasets/semseg/Cityscapes", split="train"),
),
cityscapes_val=DatasetConfig(
"cityscapes_val",
"src.datasets.cityscapes",
"Cityscapes",
dict(root="/data/datasets/semseg/Cityscapes", split="val"),
),
cityscapes_test=DatasetConfig(
"cityscapes_test",
"src.datasets.cityscapes",
"Cityscapes",
dict(root="/data/datasets/semseg/Cityscapes", split="test"),
),
cityscapes_laf=DatasetConfig(
"cityscapes_laf",
"src.datasets.cityscapes_laf",
"CityscapesLAF",
dict(root="/data/datasets/semseg/Cityscapes_lost_and_found", split="train"),
),
a2d2=DatasetConfig(
"a2d2",
"src.datasets.a2d2",
"A2D2",
dict(root="/data/datasets/semseg/A2D2", cam_positions=["front_center"]),
),
custom_dataset=DatasetConfig(
"custom_dataset",
"src.datasets.custom",
"CustomDataset",
dict(root="/path/to/your/image/directory", image_file_extension=".png"),
),
)
class CONFIG:
# --------------------- #
# set necessary paths #
# --------------------- #
# directory with inputs and outputs, i.e. saving and loading data
metaseg_io_path = "/your/metaseg/input-output/path"
# ---------------------------- #
# paths for data preparation #
# ---------------------------- #
# The following path definitions are deprecated but for
# compatibility reasons still here
IMG_DIR = "/data/ai_data_and_models/data/DS_20k/test/Input/"
GT_DIR = "/data/ai_data_and_models/data/DS_20k/test/PNG_13cl/"
PROBS_DIR = (
"/data/ai_data_and_models/inference_results/"
"FRRNA_Softmax_Output/nparrays/softmax/predictions/"
)
# ------------------ #
# select or define #
# ------------------ #
meta_model_types = ["linear", "neural"]
CLASS_DTYPE = "probs" # = "probs" ( one of ['one_hot_classes', 'probs'], just
# leave it like that
TRAIN_DATASET = datasets[
"cityscapes"
] # The dataset the semantic segmentation model got trained on
DATASET = datasets["cityscapes_val"] # used for input/output folder path
MODEL_NAME = "deeplabv3plus" # used for input/output folder path
META_MODEL_NAME = "meta_nn"
META_MODEL_TYPE = meta_model_types[1]
# --------------------------------------------------------------------#
# select tasks to be executed by setting boolean variable True/False #
# --------------------------------------------------------------------#
COMPUTE_METRICS = True
VISUALIZE_RATING = False
ANALYZE_METRICS = False
# ----------- #
# optionals #
# ----------- #
GPU_ID = 0
NUM_CORES = 8
# NUM_IMAGES = 500 # uncomment to only process the first 'NUM_IMAGES' images of
# the specified dataset
NUM_AVERAGES = 10 # only used when meta model is 'linear'
NUM_LASSO_LAMBDAS = 40 # only used when meta model is 'linear'
CLASSINDEX = None # if DATASET is set to 'a2d2' and CLASSINDEX is set to an valid
# integer only images
# containing the specified class will be processed.
INPUT_DIR = join(metaseg_io_path, "input", MODEL_NAME, DATASET.name) + "/"
METRICS_DIR = join(metaseg_io_path, "metrics", MODEL_NAME, DATASET.name) + "/"
COMPONENTS_DIR = join(metaseg_io_path, "components", MODEL_NAME, DATASET.name) + "/"
IOU_SEG_VIS_DIR = (
join(metaseg_io_path, "iou_seg_vis", MODEL_NAME, DATASET.name) + "/"
)
RESULTS_DIR = join(metaseg_io_path, "results", MODEL_NAME, DATASET.name) + "/"
STATS_DIR = join(metaseg_io_path, "stats", MODEL_NAME, DATASET.name) + "/"
LOG_FILE_PATH = join(metaseg_io_path, "log.txt")
for p in [INPUT_DIR, METRICS_DIR, COMPONENTS_DIR]:
if not os.path.exists(p):
os.makedirs(p, exist_ok=True)