-
Notifications
You must be signed in to change notification settings - Fork 11
/
semantic_segmentation.py
321 lines (258 loc) · 14.1 KB
/
semantic_segmentation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
"""This module contains functions to create the extended MNIST dataset
for semantic segmentation.
"""
import matplotlib.pyplot as plt
from typing import Tuple
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import patches as mpatches
from .array_overlay import overlay_arrays
from .mnist import preprocess_mnist, download_mnist
plt.rcParams['figure.facecolor'] = 'white'
def create_semantic_segmentation_dataset(num_train_samples: int, num_test_samples: int,
image_shape: Tuple[int, int] = (60, 60),
min_num_digits_per_image: int = 2,
max_num_digits_per_image: int = 4,
num_classes: int = 10,
max_iou: float = 0.2,
labels_are_exclusive: bool = False,
target_is_whole_bounding_box: bool = False,
proportion_of_mnist: float = 1.0,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Create the extended mnist dataset for semantic segmentation.
Parameters:
num_train_samples: Number of training samples to generate.
num_test_samples: Number of test samples to generate.
image_shape: The (height, width) of the image.
min_num_digits_per_image: The minimum number of digits that can be added
to each output image. The number is randomly selected between min_num_digits_per_image and
max_num_digits_per_image (included).
max_num_digits_per_image: The maximum number of digits that can be added
to each output image. The number is randomly selected between min_num_digits_per_image and
max_num_digits_per_image (included).
num_classes: Integer between 1 and 10. Only select images/labels between 0 and num_classes-1.
max_iou: The maximum allowed IOU (intersection over union) between two overlaid images.
A lower number means digits will overlap less.
labels_are_exclusive: If True, each pixel can only belong to one class. If False,
a pixel can be multiple digits at the same time.
target_is_whole_bounding_box: If True, the target for each digit is the whole digit's image.
If False, only the non null pixels of the digit are the target values.
proportion_of_mnist: The proportion of total mnist images to use when generating this
dataset. Smaller values will slightly speed up preprocessing (but not much).
Returns:
train_x, train_y, test_x, test_y. The input and target values of train and test.
"""
(train_images, train_labels), (test_images, test_labels) = download_mnist()
train_images, train_labels = preprocess_mnist(
images=train_images, labels=train_labels, proportion=proportion_of_mnist, num_classes=num_classes,
normalise=True)
test_images, test_labels = preprocess_mnist(images=test_images, labels=test_labels, proportion=proportion_of_mnist,
num_classes=num_classes, normalise=True)
train_x, train_y = create_semantic_segmentation_data_from_digits(
digits=train_images, digit_labels=train_labels,
num_samples=num_train_samples,
image_shape=image_shape,
min_num_digits_per_image=min_num_digits_per_image,
max_num_digits_per_image=max_num_digits_per_image,
num_classes=num_classes, max_iou=max_iou,
labels_are_exclusive=labels_are_exclusive,
target_is_whole_bounding_box=target_is_whole_bounding_box)
test_x, test_y = create_semantic_segmentation_data_from_digits(
digits=test_images, digit_labels=test_labels,
num_samples=num_test_samples, image_shape=image_shape,
min_num_digits_per_image=min_num_digits_per_image,
max_num_digits_per_image=max_num_digits_per_image,
num_classes=num_classes, max_iou=max_iou,
labels_are_exclusive=labels_are_exclusive,
target_is_whole_bounding_box=target_is_whole_bounding_box)
return train_x, train_y, test_x, test_y
def create_semantic_segmentation_data_from_digits(digits: np.ndarray,
digit_labels: np.ndarray,
num_samples: int,
image_shape: tuple,
min_num_digits_per_image: int,
max_num_digits_per_image: int,
num_classes: int,
max_iou: float,
labels_are_exclusive: bool = False,
target_is_whole_bounding_box: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""Create the extended MNIST data (either train or test) for semantic segmentation
from the provided MNIST digits and labels.
This is used by create_mnist_extended_semantic_segementation_dataset.
This function is useful directly if one wants to perform additional preprocessing on
the original mnist digits (e.g resize or warp digits etc.)
Parameters:
digits: The MNIST digits (num_images, height, width, 1)
digit_labels: The MNIST labels (num_images,)
image_shape: The (height, width) of the image.
min_num_digits_per_image: The minimum number of digits that can be added
to each output image. The number is randomly selected between min_num_digits_per_image and
max_num_digits_per_image (included).
max_num_digits_per_image: The maximum number of digits that can be added
to each output image. The number is randomly selected between min_num_digits_per_image and
max_num_digits_per_image (included).
num_classes: Integer between 1 and 10. Indicating the number of classes used in the dataset.
max_iou: The maximum allowed IOU (intersection over union) between two overlaid images.
A lower number means digits will overlap less.
labels_are_exclusive: If True, each pixel can only belong to one class. If False,
a pixel can be multiple digits at the same time.
target_is_whole_bounding_box: If True, the target for each digit is the whole digit's image.
If False, only the non null pixels of the digit are the target values.
Returns:
train_x, train_y, test_x, test_y. The input and target values of train and test.
"""
input_data = []
target_data = []
for _ in range(num_samples):
num_digits = np.random.randint(
min_num_digits_per_image, max_num_digits_per_image + 1)
input_array, arrays_overlaid, labels_overlaid, bounding_boxes_overlaid = overlay_arrays(
array_shape=image_shape + (1, ),
input_arrays=digits,
input_labels=digit_labels,
max_array_value=1,
num_input_arrays_to_overlay=num_digits,
max_iou=max_iou)
target_array = create_segmentation_target(images=arrays_overlaid,
labels=labels_overlaid,
bounding_boxes=bounding_boxes_overlaid,
image_shape=image_shape,
num_classes=num_classes,
labels_are_exclusive=labels_are_exclusive,
target_is_whole_bounding_box=target_is_whole_bounding_box)
input_data.append(input_array)
target_data.append(target_array)
input_data = np.stack(input_data)
target_data = np.stack(target_data)
return input_data, target_data
def create_segmentation_target(images: np.ndarray,
labels: np.ndarray,
bounding_boxes: np.ndarray,
image_shape: tuple,
num_classes: int,
labels_are_exclusive: bool = False,
target_is_whole_bounding_box: bool = False
) -> np.ndarray:
"""Creates the target (aka y value) based on the base images that were overlaid.
Parameters:
images: MNIST digits that were overlaid.
labels: Labels of the digits that were overlaid.
bounding_boxes: Bounding boxes (wrt output image) of the digits.
num_classes: Integer between 1 and 10. Indicating the number of classes used in the dataset.
max_iou: The maximum allowed IOU (intersection over union) between two overlaid images.
A lower number means digits will overlap less.
labels_are_exclusive: If True, each pixel can only belong to one class. If False,
a pixel can be multiple digits at the same time.
target_is_whole_bounding_box: If True, the target for each digit is the whole digit's image.
If False, only the non null pixels of the digit are the target values.
Returns:
target for a particular input. An ndarray of shape (image_shape, num_classes)
"""
if len(bounding_boxes) != len(labels) != len(images):
raise ValueError(
f'The length of bounding_boxes must be the same as the length of labels. Received shapes: {bounding_boxes.shape}!={labels.shape}')
target = np.zeros(image_shape + (num_classes,))
if labels_are_exclusive:
exclusivity_mask = np.zeros(image_shape)
for i in range(len(bounding_boxes)):
label = labels[i]
xmin, ymin, xmax, ymax = bounding_boxes[i]
if target_is_whole_bounding_box:
target[ymin:ymax, xmin:xmax, [label]] = 1
else:
max_array_value = max(target[ymin:ymax, xmin:xmax, [label]].max(), images.max())
target[ymin:ymax, xmin:xmax, [label]] = images[i] + target[ymin:ymax, xmin:xmax, [label]]
array1 = np.clip(target, a_min=0, a_max=max_array_value, out=target)
if labels_are_exclusive:
target[..., label] = np.where(
exclusivity_mask, 0, target[..., label])
exclusivity_mask = np.logical_or(
exclusivity_mask, target[..., label])
return target
def display_grayscale_array(array: np.ndarray, title: str = '', ax: matplotlib.axes.Axes = None) -> None:
"""Display the grayscale input image.
Parameters:
image: This can be either an input digit from MNIST of a input image
from the extended dataset.
title: If provided, this will be added as title of the plot.
"""
ax = ax or plt.gca()
if len(array.shape) == 3:
array = array[..., 0]
ax.imshow(array, cmap=plt.cm.binary)
ax.axes.set_yticks([])
ax.axes.set_xticks([])
if title:
ax.set_title(title)
if not ax:
plt.show()
def display_segmented_image(y: np.ndarray, threshold: float = 0.5,
input_image: np.ndarray = None,
alpha_input_image: float = 0.2,
title: str = '',
ax: matplotlib.axes.Axes = None) -> None:
"""Display segemented image.
This function displays the image where each class is shown in particular color.
This is useful for getting a rapid view of the performance of the model
on a few examples.
Parameters:
y: The array containing the prediction.
Must be of shape (image_shape, num_classes)
threshold: The threshold used on the predictions.
input_image: If provided, display the input image in black.
alpha_input_image: If an input_image is provided, the transparency of
the input_image.
"""
ax = ax or plt.gca()
base_array = np.ones(
(y.shape[0], y.shape[1], 3)) * 1
legend_handles = []
for i in range(y.shape[-1]):
# Retrieve a color (without the transparency value).
colour = plt.cm.jet(i / y.shape[-1])[:-1]
base_array[y[..., i] > threshold] = colour
legend_handles.append(mpatches.Patch(color=colour, label=str(i)))
# plt.figure(figsize=figsize)
ax.imshow(base_array)
ax.legend(handles=legend_handles, bbox_to_anchor=(1, 1), loc='upper left')
ax.set_yticks([])
ax.set_xticks([])
ax.set_title(title)
if input_image is not None:
ax.imshow(input_image[..., 0],
cmap=plt.cm.binary, alpha=alpha_input_image)
if not ax:
plt.show()
def plot_class_masks(y_true: np.ndarray, y_predicted: np.ndarray = None, title='') -> None:
"""Plot a particular view of the true vs predicted segmentation.
This function separates each class into its own image and
does not perform any thresholding.
Parameters:
y_true: True segmentation (image_shape, num_classes).
y_predicted: Predicted segmentation (image_shape, num_classes).
If y_predicted is not provided, only the true values are displayed.
"""
num_rows = 2 if y_predicted is not None else 1
num_classes = y_true.shape[-1]
fig, axes = plt.subplots(num_rows, num_classes, figsize=(num_classes * 4, num_rows * 4))
axes = axes.reshape(-1, num_classes)
fig.suptitle(title)
plt.tight_layout()
for label in range(num_classes):
axes[0, label].imshow(y_true[..., label], cmap=plt.cm.binary)
axes[0, label].axes.set_yticks([])
axes[0, label].axes.set_xticks([])
if label == 0:
axes[0, label].set_ylabel(f'Target')
if y_predicted is not None:
if label == 0:
axes[1, label].set_ylabel(f'Predicted')
axes[1, label].imshow(y_predicted[..., label], cmap=plt.cm.binary)
axes[1, label].set_xlabel(f'Label: {label}')
axes[1, label].axes.set_yticks([])
axes[1, label].axes.set_xticks([])
else:
axes[0, label].set_xlabel(f'Label: {label}')
plt.show()