Skip to content

Commit

Permalink
#120 :: Moved display of intermediate images to DotToDotImage class
Browse files Browse the repository at this point in the history
  • Loading branch information
JackBuck committed Mar 22, 2017
1 parent 0b0d984 commit 87c2291
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 10 deletions.
40 changes: 34 additions & 6 deletions roboplot/dottodot/number_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@ def __init__(self, numeric_value: int, dot_location_yx: tuple):
self.dot_location_yx = dot_location_yx


class NamedImage:
def __init__(self, image, name):
self.image = image
self.name = name


class DotToDotImage:
"""A class to process dot-to-dot images."""

Expand Down Expand Up @@ -49,7 +55,7 @@ def __init__(self, original_img):
original_img (np.ndarray): the image to proccess
"""
self._img = original_img
self.original_image = self._img.copy()
self.intermediate_images = [NamedImage(self._img.copy(), 'Original Image')]

def process_image(self) -> Number:
"""
Expand All @@ -58,6 +64,7 @@ def process_image(self) -> Number:
Returns:
Number: the number whose spot is closest to the centre of the image
"""
self.intermediate_images = [self.intermediate_images[0]] # Just keep the original image
self._clean_image()
self._extract_spots()
self._find_closest_spot_to_centre()
Expand All @@ -72,7 +79,7 @@ def _clean_image(self):
self._img = cv2.medianBlur(self._img, ksize=3)
self._img = cv2.adaptiveThreshold(self._img, maxValue=255, adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
thresholdType=cv2.THRESH_BINARY, blockSize=11, C=2)
self.clean_image = self._img.copy()
self.intermediate_images.append(NamedImage(self._img.copy(), 'Clean Image'))

def _extract_spots(self):
# Dilate and Erode to 'clean' the spot (note that this harms the number itself, so we only do it to extract spots
Expand All @@ -93,19 +100,36 @@ def _extract_spots(self):
detector = cv2.SimpleBlobDetector_create(params)
self.spot_keypoints = detector.detect(img)

# Log intermediate image
img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))

def _find_closest_spot_to_centre(self):
if self.spot_keypoints is None or len(self.spot_keypoints) == 0:
self.centre_spot = None
else:
image_centre = np.array(self.original_image.shape) / 2
image_centre = np.array(self._img.shape) / 2
self.centre_spot = min(self.spot_keypoints, key=lambda s: np.linalg.norm(s.pt - image_centre))

try:
spots_image = next(img.image for img in self.intermediate_images if img.name == 'Spot Detection Image')
cv2.circle(spots_image, tuple(int(i) for i in self.centre_spot.pt), radius=int(self.centre_spot.size),
color=(0, 255, 0), thickness=2)
except StopIteration:
pass

def _extract_central_contours(self, maximum_pixels_between_contours: float):
self.central_contours = None
if self.centre_spot is not None:
self.central_contours = self._extract_contours_close_to(self.centre_spot.pt,
maximum_pixels_between_contours)

# Log intermediate image
img = cv2.cvtColor(self._img.copy(), cv2.COLOR_GRAY2BGR)
cv2.drawContours(img, self.central_contours, contourIdx=-1, color=(0, 0, 255), thickness=1)
self.intermediate_images.append(NamedImage(img, 'Central contours'))

def _extract_contours_close_to(self, target_point, maximum_pixels_between_contours: float):
img_inverted = 255 - self._img
_, all_contours, _ = cv2.findContours(img_inverted, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
Expand Down Expand Up @@ -134,7 +158,7 @@ def dist_between_contours(cnt1, cnt2):
def _mask_using_central_contours(self):
if self.central_contours is not None:
self._img = self._mask_using_contours(self.central_contours)
self.masked_image = self._img.copy()
self.intermediate_images.append(NamedImage(self._img.copy(), 'Masked Image'))

def _mask_using_contours(self, contours):
img = self._img.copy()
Expand All @@ -144,12 +168,11 @@ def _mask_using_contours(self, contours):
return img

def _rotate_centre_spot_to_bottom_right(self):
self.rotated_image = None
if self.centre_spot is not None:
current_angle = self._estimate_degrees_from_number_centre_to_spot()
desired_angle = -30
self._img = _rotate_image_anticlockwise(desired_angle - current_angle, self._img)
self.rotated_image = self._img.copy()
self.intermediate_images.append(NamedImage(self._img.copy(), 'Rotated Image'))

def _estimate_degrees_from_number_centre_to_spot(self):
inverted_image = 255 - self._img
Expand Down Expand Up @@ -177,6 +200,11 @@ def _extract_number_from_recognised_text(self):
if match is not None:
self.recognised_numeric_value = int(match.group(1))

def display_intermediate_images(self):
for img in self.intermediate_images:
cv2.imshow(winname=img.name, mat=img.image)
cv2.waitKey(0)


def read_image(file_path: str) -> np.ndarray:
"""
Expand Down
5 changes: 1 addition & 4 deletions scripts/recognise_number.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,4 @@

# Display images
if args.display_images:
number_recognition.draw_image_with_keypoints(img.clean_image, [img.centre_spot], "Clean with centre spot")
number_recognition.draw_image_with_contours(img.clean_image, img.central_contours, "Clean with centre contours")
number_recognition.draw_image(img.masked_image, "Masked image")
number_recognition.draw_image(img.rotated_image, "Rotated image")
img.display_intermediate_images()

0 comments on commit 87c2291

Please sign in to comment.