Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
442 lines (350 sloc) 13.5 KB
from __future__ import division
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import Iterator
from keras.utils.np_utils import to_categorical
import keras.backend as K
def angle_difference(x, y):
"""
Calculate minimum difference between two angles.
"""
return 180 - abs(abs(x - y) - 180)
def angle_error(y_true, y_pred):
"""
Calculate the mean diference between the true angles
and the predicted angles. Each angle is represented
as a binary vector.
"""
diff = angle_difference(K.argmax(y_true), K.argmax(y_pred))
return K.mean(K.cast(K.abs(diff), K.floatx()))
def angle_error_regression(y_true, y_pred):
"""
Calculate the mean diference between the true angles
and the predicted angles. Each angle is represented
as a float number between 0 and 1.
"""
return K.mean(angle_difference(y_true * 360, y_pred * 360))
def binarize_images(x):
"""
Convert images to range 0-1 and binarize them by making
0 the values below 0.1 and 1 the values above 0.1.
"""
x /= 255
x[x >= 0.1] = 1
x[x < 0.1] = 0
return x
def rotate(image, angle):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
Source: http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]
)
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([
[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)],
[0, 0, 1]
])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(
image,
affine_mat,
(new_w, new_h),
flags=cv2.INTER_LINEAR
)
return result
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
Source: http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (
bb_w - 2 * x,
bb_h - 2 * y
)
def crop_around_center(image, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
Source: http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if(width > image_size[0]):
width = image_size[0]
if(height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
return image[y1:y2, x1:x2]
def crop_largest_rectangle(image, angle, height, width):
"""
Crop around the center the largest possible rectangle
found with largest_rotated_rect.
"""
return crop_around_center(
image,
*largest_rotated_rect(
width,
height,
math.radians(angle)
)
)
def generate_rotated_image(image, angle, size=None, crop_center=False,
crop_largest_rect=False):
"""
Generate a valid rotated image for the RotNetDataGenerator. If the
image is rectangular, the crop_center option should be used to make
it square. To crop out the black borders after rotation, use the
crop_largest_rect option. To resize the final image, use the size
option.
"""
height, width = image.shape[:2]
if crop_center:
if width < height:
height = width
else:
width = height
image = rotate(image, angle)
if crop_largest_rect:
image = crop_largest_rectangle(image, angle, height, width)
if size:
image = cv2.resize(image, size)
return image
class RotNetDataGenerator(Iterator):
"""
Given a NumPy array of images or a list of image paths,
generate batches of rotated images and rotation angles on-the-fly.
"""
def __init__(self, input, input_shape=None, color_mode='rgb', batch_size=64,
one_hot=True, preprocess_func=None, rotate=True, crop_center=False,
crop_largest_rect=False, shuffle=False, seed=None):
self.images = None
self.filenames = None
self.input_shape = input_shape
self.color_mode = color_mode
self.batch_size = batch_size
self.one_hot = one_hot
self.preprocess_func = preprocess_func
self.rotate = rotate
self.crop_center = crop_center
self.crop_largest_rect = crop_largest_rect
self.shuffle = shuffle
if self.color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', self.color_mode,
'; expected "rgb" or "grayscale".')
# check whether the input is a NumPy array or a list of paths
if isinstance(input, (np.ndarray)):
self.images = input
N = self.images.shape[0]
if not self.input_shape:
self.input_shape = self.images.shape[1:]
# add dimension if the images are greyscale
if len(self.input_shape) == 2:
self.input_shape = self.input_shape + (1,)
else:
self.filenames = input
N = len(self.filenames)
super(RotNetDataGenerator, self).__init__(N, batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
# create array to hold the images
batch_x = np.zeros((len(index_array),) + self.input_shape, dtype='float32')
# create array to hold the labels
batch_y = np.zeros(len(index_array), dtype='float32')
# iterate through the current batch
for i, j in enumerate(index_array):
if self.filenames is None:
image = self.images[j]
else:
is_color = int(self.color_mode == 'rgb')
image = cv2.imread(self.filenames[j], is_color)
if is_color:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.rotate:
# get a random angle
rotation_angle = np.random.randint(360)
else:
rotation_angle = 0
# generate the rotated image
rotated_image = generate_rotated_image(
image,
rotation_angle,
size=self.input_shape[:2],
crop_center=self.crop_center,
crop_largest_rect=self.crop_largest_rect
)
# add dimension to account for the channels if the image is greyscale
if rotated_image.ndim == 2:
rotated_image = np.expand_dims(rotated_image, axis=2)
# store the image and label in their corresponding batches
batch_x[i] = rotated_image
batch_y[i] = rotation_angle
if self.one_hot:
# convert the numerical labels to binary labels
batch_y = to_categorical(batch_y, 360)
else:
batch_y /= 360
# preprocess input images
if self.preprocess_func:
batch_x = self.preprocess_func(batch_x)
return batch_x, batch_y
def next(self):
with self.lock:
# get input data index and size of the current batch
index_array = next(self.index_generator)
# create array to hold the images
return self._get_batches_of_transformed_samples(index_array)
def display_examples(model, input, num_images=5, size=None, crop_center=False,
crop_largest_rect=False, preprocess_func=None, save_path=None):
"""
Given a model that predicts the rotation angle of an image,
and a NumPy array of images or a list of image paths, display
the specified number of example images in three columns:
Original, Rotated and Corrected.
"""
if isinstance(input, (np.ndarray)):
images = input
N, h, w = images.shape[:3]
if not size:
size = (h, w)
indexes = np.random.choice(N, num_images)
images = images[indexes, ...]
else:
images = []
filenames = input
N = len(filenames)
indexes = np.random.choice(N, num_images)
for i in indexes:
image = cv2.imread(filenames[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
images = np.asarray(images)
x = []
y = []
for image in images:
rotation_angle = np.random.randint(360)
rotated_image = generate_rotated_image(
image,
rotation_angle,
size=size,
crop_center=crop_center,
crop_largest_rect=crop_largest_rect
)
x.append(rotated_image)
y.append(rotation_angle)
x = np.asarray(x, dtype='float32')
y = np.asarray(y, dtype='float32')
if x.ndim == 3:
x = np.expand_dims(x, axis=3)
y = to_categorical(y, 360)
x_rot = np.copy(x)
if preprocess_func:
x = preprocess_func(x)
y = np.argmax(y, axis=1)
y_pred = np.argmax(model.predict(x), axis=1)
plt.figure(figsize=(10.0, 2 * num_images))
title_fontdict = {
'fontsize': 14,
'fontweight': 'bold'
}
fig_number = 0
for rotated_image, true_angle, predicted_angle in zip(x_rot, y, y_pred):
original_image = rotate(rotated_image, -true_angle)
if crop_largest_rect:
original_image = crop_largest_rectangle(original_image, -true_angle, *size)
corrected_image = rotate(rotated_image, -predicted_angle)
if crop_largest_rect:
corrected_image = crop_largest_rectangle(corrected_image, -predicted_angle, *size)
if x.shape[3] == 1:
options = {'cmap': 'gray'}
else:
options = {}
fig_number += 1
ax = plt.subplot(num_images, 3, fig_number)
if fig_number == 1:
plt.title('Original\n', fontdict=title_fontdict)
plt.imshow(np.squeeze(original_image).astype('uint8'), **options)
plt.axis('off')
fig_number += 1
ax = plt.subplot(num_images, 3, fig_number)
if fig_number == 2:
plt.title('Rotated\n', fontdict=title_fontdict)
ax.text(
0.5, 1.03, 'Angle: {0}'.format(true_angle),
horizontalalignment='center',
transform=ax.transAxes,
fontsize=11
)
plt.imshow(np.squeeze(rotated_image).astype('uint8'), **options)
plt.axis('off')
fig_number += 1
ax = plt.subplot(num_images, 3, fig_number)
corrected_angle = angle_difference(predicted_angle, true_angle)
if fig_number == 3:
plt.title('Corrected\n', fontdict=title_fontdict)
ax.text(
0.5, 1.03, 'Angle: {0}'.format(corrected_angle),
horizontalalignment='center',
transform=ax.transAxes,
fontsize=11
)
plt.imshow(np.squeeze(corrected_image).astype('uint8'), **options)
plt.axis('off')
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
if save_path:
plt.savefig(save_path)
You can’t perform that action at this time.