@@ -0,0 +1,164 @@
import pyrebase
from datetime import datetime

# Configure Firebase
config = {
"apiKey": "AIzaSyDyTulUCX5swJauYm8YM55Vr5a5vv8ipPQ",
"authDomain": "rubiksscanner.firebaseapp.com",
"databaseURL": "https://rubiksscanner.firebaseio.com",
"storageBucket": "rubiksscanner.appspot.com"
}

# Get database
firebase = pyrebase.initialize_app(config)
db = firebase.database()

# Used to compute the average number of seconds to complete each round
def computeAvgSeconds(comp_id, times):
all_times = []
for r, time in enumerate(times):
# add each round's time to database
round = r + 1
db.child("EventName").child("Competitors").child(comp_id).child(round).set(time)

# get time in total seconds
time = time.replace(".", ":")
divided = time.split(":")
minutes = divided[0]
seconds = divided[1]
miliseconds = divided[2]
total_seconds = float(minutes)*60 + float(seconds) + float(miliseconds)*0.001
#print(total_seconds)

# add each time to list
all_times.append(total_seconds)

# remove max and min time from list
all_times.remove(max(all_times))
all_times.remove(min(all_times))

# compute average of remaining times
average_time = sum(all_times)/3

return average_time


# Used to convert average time back to string format
def convertTimeFormat(average_time):
int_time = int(average_time)

miliseconds = "%.3f" % (average_time - int_time)
minutes = int(average_time / 60)
seconds = int_time - minutes*60

# add 0 to front of minutes if <10
minutes = str(minutes)
if len(minutes) == 1:
minutes = "0" + minutes

# same with seconds
seconds = str(seconds)
if len(seconds) == 1:
seconds = "0" + seconds

avg_string = minutes + ":" + seconds + "." + str(miliseconds[2:])

return avg_string


# Add info to database given id of competitor and array of 5 strings representing
# the times it took to complete each round
def addInfoToDatabase(comp_id, times, flagged):

seconds = computeAvgSeconds(comp_id, times)
print(seconds)

avg_time = convertTimeFormat(seconds)
print(avg_time)

# create strings to add to database for flagged solve times

flagged_id = ""
for f in flagged[0]:
flagged_id += f + ","

flagged_1 = ""
for f in flagged[1]:
flagged_1 += f + ","

flagged_2 = ""
for f in flagged[2]:
flagged_2 += f + ","

flagged_3 = ""
for f in flagged[3]:
flagged_3 += f + ","

flagged_4 = ""
for f in flagged[4]:
flagged_4 += f + ","

flagged_5 = ""
for f in flagged[5]:
flagged_5 += f + ","

# add the average time and average seconds to database
db.child("EventName").child("Competitors").child(comp_id).child("seconds").set(seconds)
db.child("EventName").child("Competitors").child(comp_id).child("avg").set(avg_time)
db.child("EventName").child("Competitors").child(comp_id).child("Flagged").child("ID").set(flagged_id)
db.child("EventName").child("Competitors").child(comp_id).child("Flagged").child(2).set(flagged_1)
db.child("EventName").child("Competitors").child(comp_id).child("Flagged").child(2).set(flagged_2)
db.child("EventName").child("Competitors").child(comp_id).child("Flagged").child(3).set(flagged_3)
db.child("EventName").child("Competitors").child(comp_id).child("Flagged").child(4).set(flagged_4)
db.child("EventName").child("Competitors").child(comp_id).child("Flagged").child(5).set(flagged_5)


# Return Dictionary of competitors organized in order of average completion time
def getWinners():
# dictionary of competitors
winners_dict = {}
competitors = db.child("EventName").child("Competitors").get()

# add competitor id/times to dictionary and check if times are correct
for c in competitors.each():
if type(c.val()) is dict:
comp_id = str(c.key())
seconds = c.val()['seconds']
round_1 = c.val()['1']
round_2 = c.val()['2']
round_3 = c.val()['3']
round_4 = c.val()['4']
round_5 = c.val()['5']
times = [round_1, round_2, round_3, round_4, round_5]
new_seconds = computeAvgSeconds(comp_id, times)
# check to see if round data has been changed
if (new_seconds == seconds):
winners_dict[comp_id] = seconds
else:
# if data changed, update average times in database
print("in else for id" + comp_id)
winners_dict[comp_id] = new_seconds
new_average = convertTimeFormat(new_seconds)
db.child("EventName").child("Competitors").child(comp_id).child("seconds").set(new_seconds)
db.child("EventName").child("Competitors").child(comp_id).child("avg").set(new_average)


# sort dictionary by average seconds
ordered_winners = sorted(winners_dict.items(), key=lambda x: x[1])

print("Winners") # Print winners

place = 1
for key, value in ordered_winners:
# get average time for each competitor from database
time = db.child("EventName").child("Competitors").child(str(key)).child('avg').get()
print (str(place) + ". Competitor id " + str(key) + ": " + time.val())
place += 1

return ordered_winners

#times = ["01:23:456", "02:04:818", "05:04:321", "03:14:888", "01:52:582"]
#flagged = [[], [], ["6"], [], [], ["1"]]

#addInfoToDatabase("880", times, flagged)
#getWinners()
@@ -0,0 +1,172 @@
# Steven Stetzler
# Project 2
# panorama_stitching.py

import cv2
from matplotlib import pyplot as plt
import numpy as np
import skimage, skimage.io, scipy.ndimage.filters, math, scipy.signal, skimage.color, skimage.feature, random


# This function applies a homography to the coordinate (x, y), producing (x_h, y_h)
def apply_homography(h, x, y):
x_h = h[0, 0] * x + h[0, 1] * y + h[0, 2]
y_h = h[1, 0] * x + h[1, 1] * y + h[1, 2]
d_h = h[2, 0] * x + h[2, 1] * y + 1
x_h /= d_h
y_h /= d_h
return x_h, y_h


# This function takes in a list of 4 points (x_a, y_a) from image A and 4 points (x_b, y_,) from image B
# and fits a homography exactly to those point
def fit_homography(points_a, points_b):
A = np.zeros([8, 8])
b = np.zeros(8)
for i in range(0, 4):
A[i, 0] = points_b[i][0]
A[i, 1] = points_b[i][1]
A[i, 2] = 1
A[i, 6] = -points_a[i][0] * points_b[i][0]
A[i, 7] = -points_a[i][0] * points_b[i][1]
A[i + 4, 3] = points_b[i][0]
A[i + 4, 4] = points_b[i][1]
A[i + 4, 5] = 1
A[i + 4, 6] = -points_a[i][1] * points_b[i][0]
A[i + 4, 7] = -points_a[i][1] * points_b[i][1]
b[i] = points_a[i][0]
b[i + 4] = points_a[i][1]
# print "Constructed matrix:", A
params = np.linalg.lstsq(A, b)
# print "Params: ", params[0]
H = np.zeros([3, 3])
for i in range(0, 3):
for j in range(0, 3):
if i == 2 and j == 2:
H[i, j] = 1
else:
H[i, j] = params[0][i * 3 + j]
return H


# This function returns the images a and a stitched and blended together based on the homography H
def composite_warped(a, b, H):
# "Warp images a and b to a's coordinate system using the homography H which maps b coordinates to a coordinates."
out_shape = (a.shape[0], 2 * a.shape[1]) # Output image (height, width)
p = skimage.transform.ProjectiveTransform(np.linalg.inv(H)) # Inverse of homography (used for inverse warping)
bwarp = skimage.transform.warp(b, p, output_shape=out_shape) # Inverse warp b to a coords
plt.imshow(bwarp)
bvalid = np.zeros(b.shape, 'uint8') # Establish a region of interior pixels in b
bvalid[1:-1, 1:-1, :] = 255
bmask = skimage.transform.warp(bvalid, p, output_shape=out_shape) # Inverse warp interior pixel region to a coords
avalid = np.zeros(a.shape, 'uint8')
avalid[1:-1, 1:-1, :] = 255
apad = np.hstack((skimage.img_as_float(a), np.zeros(a.shape))) # Pad a with black pixels on the right
# Compute the distance transform of the padded A and B images
dist_a = scipy.ndimage.morphology.distance_transform_edt(skimage.color.rgb2gray(apad))
dist_b = scipy.ndimage.morphology.distance_transform_edt(skimage.color.rgb2gray(bmask))
out_image = np.where(bmask == 1.0, bwarp, apad)
for y in range(0, out_shape[0]):
for x in range(0, out_shape[1]):
if (dist_a[y, x] != 0) and (dist_b[y, x] != 0):
# Compute the alpha value at each area in the overlapping region based on the distance
# values of each pixel in A and B
# The alpha determines how much of each image (the background or the foreground) should contribute
# to the final image
alpha = dist_a[y, x] / (dist_a[y, x] + dist_b[y, x])
# Blend the background and the foreground together using the alpha computed
out_image[y, x] = alpha * apad[y, x] + (1 - alpha) * out_image[y, x]
return skimage.img_as_ubyte(out_image)


# Use the RANSAC algorithm to find the best homography to stitch the two images images together
def get_best_homography(good, keypoints_a, keypoints_b):
best_match_inlier_count = 0
best_match = None
# Pick a large amount of iterations to ensure convergence on the best match
for _ in range(0, 1400):
# Get four random corresponding features
random_four_matches = random.sample(good, 4)
a = []
b = []
for i in range(0, 4):
a.append([keypoints_a[random_four_matches[i][0].queryIdx].pt[0],
keypoints_a[random_four_matches[i][0].queryIdx].pt[1]])
b.append([keypoints_b[random_four_matches[i][0].trainIdx].pt[0],
keypoints_b[random_four_matches[i][0].trainIdx].pt[1]])
# Fit a homography to the four (x, y) points in image a and image b
homography = fit_homography(a, b)
# Loop through all good matches and determine if this homography is good by computing the number of
# corresponding features which are close to where they should be once the homography is applied
# counting the number of inliers
inliers = 0
for match in good:
x_a, y_a = keypoints_a[match[0].queryIdx].pt
x_b, y_b = keypoints_b[match[0].trainIdx].pt
x_b_hom, y_b_hom = apply_homography(homography, x_b, y_b)
# Compute the distance from the transformed coordinate (H applied to feature in B) to the coordinate of
# that feature in A
dist = math.sqrt((x_a - x_b_hom) ** 2 + (y_a - y_b_hom) ** 2)
if dist < 0.01:
inliers += 1
if inliers > best_match_inlier_count:
best_match_inlier_count = inliers
best_match = homography
print(('Found', best_match_inlier_count, 'inliers.'))
print('Best Homography:')
print(best_match)
return best_match


print('Reading images.')
image_a = cv2.imread("test_images\\close_center.jpg")
image_b = cv2.imread("test_images\\template_inside.png")

# Use the SIFT descriptor to find keypoint features in the left and right images
print('Finding features in both images.')
sift = cv2.xfeatures2d.SIFT_create()
# Initiate STAR detector
orb = cv2.ORB_create()
keypoints_a = orb.detect(image_a, None)
keypoints_b = orb.detect(image_b, None)
keypoints_a, descriptor_a = orb.compute(image_a, keypoints_a)
keypoints_b, descriptor_b = orb.compute(image_b, keypoints_b)

img2 = cv2.drawKeypoints(image_a, keypoints_a, color=(0, 255, 0), flags=0, None)
img3 = cv2.drawKeypoints(image_b, keypoints_b, color=(255, 0, 0), flags=0, None)

plt.imsave("orb_center.png", img2)
plt.imsave('orb_template.png', img3)

# #keypoints_a, descriptor_a = sift.detectAndCompute(image_a, None)
# #keypoints_b, descriptor_b = sift.detectAndCompute(image_b, None)
#
# # Compare the features in both images. For each feature in the first image,
# # find the closest matching feature in the other image
# print('Computing matching features.')
# bf = cv2.BFMatcher()
# matches = bf.knnMatch(descriptor_a, descriptor_b, k=2)
#
# # Apply the ratio test to remove matches which are too similar to one another
# # Keeping only the unique matches
# print('Applying ratio test to matches.')
# good = []
# for m, n in matches:
# if m.distance < 0.7 * n.distance:
# good.append([m])
#
# # Find the best homography to stitch the two images together
# print('Computing best homography.')
# best_homography = get_best_homography(good, keypoints_a, keypoints_b)
#
# p = skimage.transform.ProjectiveTransform(best_homography) # Inverse of homography (used for inverse warping)
# bwarp = skimage.transform.warp(image_a, p, output_shape=(500, 500)) # Inverse warp b to a coords
# plt.imsave("warped.png", bwarp)
#
#
# # Stitch the images
# #print 'Stitching images.'
# #image_out = composite_warped(image_a, image_b, best_homography)
#
# #print 'Saving stitched image to: stitched.png'
# #plt.imsave("stitched.png", image_out[:, :, ::-1])
@@ -0,0 +1,262 @@
import urllib.request
import numpy as np
import sys
import skimage.transform
import skimage.filters
import cv2
from keras import backend as K
from keras.models import load_model
from rubiks_database import getWinners, addInfoToDatabase
from skimage import img_as_ubyte


def get_scorecard_sift(image, template):

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(image, None)
kp2, des2 = sift.detectAndCompute(template, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.5 * n.distance:
good.append(m)

print("# Matches: ", len(good))

if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
print(M)
h, w = image.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(template, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

h_2, w_2, _ = img2.shape
adjusted_image = cv2.warpPerspective(image, M, (w_2, h_2))
# cv2.imshow("Warped", adjusted_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

return adjusted_image
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
return None


def predict_digit(image):
# input image dimensions
img_rows, img_cols = 28, 28
image = skimage.transform.resize(image, (img_cols, img_rows), mode='constant')
if K.image_data_format() == 'channels_first':
image = image.reshape(1, img_rows, img_cols)
else:
image = image.reshape(img_rows, img_cols, 1)

model = load_model('CNN\\new_model.h5')

prediction = model.predict(np.asarray([image]))[0]
which_digit = np.argmax(prediction)
confidence = np.max(prediction)
return which_digit, confidence


def get_id_from_scorecard(image):
bw = image < skimage.filters.threshold_local(image, 101)
bw = bw.astype("float32")
digits = []
for column in range(0, 3):
min_y = 134
max_y = 177
min_x = 43 + 54 * column
max_x = 90 + 54 * column
digit = bw[min_y:max_y, min_x:max_x]
digits.append(digit)
# cv2.imshow("digit", digit)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return digits


def get_row_of_digits_from_scorecard(image, row_num):
adjusted_row_num = row_num - 1
# print("shape", image.shape)
bw = image < skimage.filters.threshold_local(image, 101)
# plt.imshow(bw), plt.show()
cv2.imwrite('presentation_images\\bw_digits.png', img_as_ubyte(bw))

# cv2.imshow("Black and White", bw)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
digits = []
for column in range(0, 7):
min_y = 233 + 49 * adjusted_row_num
max_y = 273 + 49 * adjusted_row_num
min_x = 43 + 54 * column
max_x = 90 + 54 * column
digit_not_bw = image[min_y:max_y, min_x:max_x]
digit = bw[min_y:max_y, min_x:max_x]
# plt.imshow(digit_not_bw), plt.show()
# plt.imshow(digit), plt.show()

cnts = cv2.findContours(img_as_ubyte(digit.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[1]
digitCnts = []
digit_draw = digit_not_bw.copy()
# print("Found", cnts, "number of contours")
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:2]
# loop over the digit area candidates
added = False
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if not ((40 >= h >= 12) and (40 >= w >= 3)):
continue
else:
added = True
digitCnts.append(c)
cv2.rectangle(digit_draw, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
# cv2.imshow("Show", digit_draw)
# cv2.waitKey()
# cv2.destroyAllWindows()
digit_crop = digit[y:y+h, x:x+w]
# plt.imshow(digit), plt.show()
# plt.imshow(digit_crop), plt.show()
resize_ratio = min(20./w, 20./h)
resize_width = int(resize_ratio * w)
resize_height = int(resize_ratio * h)
digit_resized = skimage.transform.resize(digit_crop, (resize_height, resize_width), mode='constant')
# plt.imshow(digit), plt.show()
# plt.imshow(digit_resized), plt.show()
digit_28_28 = np.zeros((28, 28), dtype=float)
lower_bound_y = int((28 - resize_height)/2)
upper_bound_y = int(resize_height + (28 - resize_height)/2)
lower_bound_x = int((28 - resize_width)/2)
upper_bound_x = int(resize_width + (28 - resize_width)/2)
digit_28_28[lower_bound_y:upper_bound_y, lower_bound_x:upper_bound_x] = digit_resized
# plt.imshow(digit, 'gray'), plt.show()
# plt.imshow(digit_28_28, 'gray'), plt.show()
cv2.imwrite('presentation_images\\digits\\' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_not_bw))
cv2.imwrite('presentation_images\\digits\\box_' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_draw))
cv2.imwrite('presentation_images\\digits\\bw_' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_crop))
cv2.imwrite('presentation_images\\digits\\bw_28_' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_28_28))
digits.append(img_as_ubyte(digit_28_28))
if not added:
digits.append(None)


# cv2.imshow("digit", digit)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return digits


def construct_id(digit_images):
digits = []
digit_flags = []
i = 0
for digit in digit_images:
predicted_digit, confidence = predict_digit(digit)
print("Predicted:", predicted_digit, "with confidence", 100*confidence)
digits.append(predicted_digit)
if confidence < 0.75:
digit_flags.append(str(i))
i += 1
comp_ip = str(digits[0]) + str(digits[1]) + str(digits[2])
return comp_ip, digit_flags


def construct_time(digit_images):
digits = []
digit_flags = []
i = 0
for digit in digit_images:
if digit is None:
predicted_digit = 0
confidence = 0.
else:
predicted_digit, confidence = predict_digit(digit)
print("Predicted:", predicted_digit, "with confidence", 100*confidence)
digits.append(predicted_digit)
if confidence < 0.5:
digit_flags.append(str(i))
i += 1
time = str(digits[0]) + str(digits[1]) + ":" + str(digits[2]) + str(digits[3]) + ":" + str(digits[4]) + str(
digits[5]) + str(digits[6])
return time, digit_flags


def start_video_stream(host):
if len(sys.argv) > 1:
host = sys.argv[1]

hoststr = 'http://' + host + '/video?x.mjpeg'
print('Streaming ' + hoststr)

stream = urllib.request.urlopen(hoststr)

template = cv2.imread('test_images\\template_new.png')
bytes_from_stream = bytes()
i = 0
while True:
bytes_from_stream += stream.read(1024)
a = bytes_from_stream.find(b'\xff\xd8')
b = bytes_from_stream.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes_from_stream[a:b + 2]
bytes_from_stream = bytes_from_stream[b + 2:]
frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow(host, frame)
if i % 30 == 0:
adjusted_image = get_scorecard_sift(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), template)
if adjusted_image is not None:
cv2.imshow("image", adjusted_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
all_flags = []
all_times = []
id_digits = get_id_from_scorecard(adjusted_image)
comp_ip, flags = construct_id(id_digits)
all_flags.append(flags)
print("Competitor id:", comp_ip)
for row in range(1, 6):
row_of_digits = get_row_of_digits_from_scorecard(adjusted_image, row)
constructed_time, flags = construct_time(row_of_digits)
all_flags.append(flags)
all_times.append(constructed_time)
print(constructed_time)
for flag in all_flags:
print(flag)
addInfoToDatabase(comp_ip, all_times, all_flags)
getWinners()

# Press escape to close
if cv2.waitKey(1) == 27:
exit(0)
i += 1

start_video_stream("192.168.137.82:8080")

@@ -0,0 +1,279 @@
import skimage.transform
import skimage.filters
import numpy as np
import argparse
import cv2
import imutils
import tensorflow
import keras
from keras.datasets import mnist
from keras.optimizers import RMSprop
from keras import backend as K
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model, load_model
from keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D


def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left

rect = np.zeros((4, 2), dtype="float32")

# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]

# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect


# In[13]:

def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect

# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))

# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))

# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")

# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))

# return the warped image
return warped


def get_scorecard():
image = cv2.imread("D:\\Google Drive\\UVa\\Classes\\Semester 6\\CS 4501\\Project 4\\test_images\\close_center.jpg")
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height=500)

# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# gray = cv2.equalizeHist(gray)
edged = cv2.Canny(gray, 30, 180)

# show the original image and the edge detected image
print("STEP 1: Edge Detection")
# cv2.imshow("Image", image)
# cv2.imshow("Edged", edged)

# find the contours in the edged image, keeping only the
# largest ones, and initialize the screen contour
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)

# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break

# show the contour (outline) of the piece of paper
print("STEP 2: Find contours of paper")
# cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
# cv2.imshow("Outline", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)

# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
out_bgr = skimage.transform.rotate(skimage.transform.resize(warped, (650, 650), mode='constant'), 90)
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
# warped = threshold_adaptive(warped, 251, offset = 10)
# warped = warped.astype("uint8") * 255

# show the original and scanned images
print("STEP 3: Apply perspective transform")
# cv2.imshow("Original", imutils.resize(orig, height = 650))
out_gray = skimage.transform.rotate(skimage.transform.resize(warped, (650, 650), mode='constant'), 90)
# cv2.imshow("Scanned", out)
# cv2.imwrite("scanned.png", out)
# cv2.waitKey(0)
return out_bgr, out_gray


def predict_digit(image):
# input image dimensions
img_rows, img_cols = 28, 28
image = skimage.transform.resize(image, (img_cols, img_rows), mode='constant')
if K.image_data_format() == 'channels_first':
image = image.reshape(1, img_rows, img_cols)
else:
image = image.reshape(img_rows, img_cols, 1)

model = load_model('CNN\\mnist_cnn_32.h5')

return model.predict(np.asarray([image]))[0]


def test_contours(image):
image = (image * 255).astype("uint8")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
cv2.imshow("blurred", blurred)
cv2.waitKey(0)
cv2.destroyAllWindows()
# gray = cv2.equalizeHist(gray)
edged = cv2.Canny(blurred, 1, 80)
cv2.imshow("Edges", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cnts = cnts[:7] #sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
polys = []
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
polys.append(approx)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
cv2.drawContours(image, [approx], -1, (0, 255, 0), 2)
cv2.imshow("Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.drawContours(image, polys, -1, (128, 255, 0), 2)
cv2.imshow("Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()


def test_contours_2(image):
bw = image < skimage.filters.threshold_local(image, 267)
bw = bw.astype("float32")
cv2.imshow("scorecard", bw)
cv2.waitKey(0)
cv2.destroyAllWindows()


def get_digits_from_scorecard(image):
# print("shape", image.shape)
bw = image < skimage.filters.threshold_local(image, 267)
bw = bw.astype("float32")
# cv2.imshow("Black and White", bw)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
digits = []
for i in range(0, 7):
digit = bw[298:298 + 50, 58 + 54 * i: 58 + 54 * i + 50]
digits.append(digit)
# cv2.imshow("digit", digit)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return digits


def construct_time(digits):
time = str(digits[0]) + str(digits[1]) + ":" + str(digits[2]) + str(digits[3]) + "." + str(digits[4]) + str(
digits[5]) + str(digits[6])
return time


def keypoints(image):
template = cv2.imread(
"D:\\Google Drive\\UVa\\Classes\\Semester 6\\CS 4501\\Project 4\\test_images\\template_cross.png")
# Use the SIFT descriptor to find keypoint features in the left and right images
image = cv2.cvtColor((image * 255).astype("uint8"), cv2.COLOR_BGR2GRAY)
template = cv2.cvtColor((template * 255).astype("uint8"), cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
keypoints_image, descriptor_image = sift.detectAndCompute(image, None)
keypoints_template, descriptor_template = sift.detectAndCompute(template, None)

# Compare the features in both images. For each feature in the first image,
# find the closest matching feature in the other image
# print 'Computing matching features.'
bf = cv2.BFMatcher()
matches = bf.knnMatch(descriptor_image, descriptor_template, k=2)

# Apply the ratio test to remove matches which are too similar to one another
# Keeping only the unique matches
# print 'Applying ratio test to matches.'
good = []
for m, n in matches:
if m.distance < 0.5 * n.distance:
good.append([m])

img3 = cv2.drawMatchesKnn(image, keypoints_image, template, keypoints_template, good, None, flags=2)
cv2.imshow("matches", img3)
cv2.waitKey(0)
cv2.destroyAllWindows()


scorecard_color, scorecard_gray = get_scorecard()
# keypoints(scorecard_color)

bw = scorecard_gray < skimage.filters.threshold_local(scorecard_gray, 267)
bw = bw.astype("float32")
cv2.imshow("scorecard", bw)
cv2.waitKey(0)
cv2.destroyAllWindows()


# for i in range(0, 7):
# image = scorecard_color[297:347, 56 + i * 54:56 + i * 54 + 50]
# test_contours(image)
# digits = get_digits_from_scorecard(scorecard_gray)
# predicted_digits = []
# for digit in digits:
# prediction = predict_digit(digit)
# print(prediction)
# which_digit = np.argmax(prediction)
# predicted_digits.append(which_digit)
# print(which_digit)
# print("Predicted Time:", construct_time(predicted_digits))
@@ -0,0 +1,74 @@
import numpy as np
import cv2
from matplotlib import pyplot as plt
import skimage.transform
import skimage.filters
from skimage import img_as_ubyte


MIN_MATCH_COUNT = 10

img1 = cv2.imread('test_images\\skew_shadow_new.jpg', 0) # queryImage
img2 = cv2.imread('test_images\\template_new.png', 0) # trainImage

img1 = cv2.resize(img1, (int(img1.shape[1]), int(img1.shape[0])))

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)

print("# Matches: ", len(good))

if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
print(M)
h, w = img1.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

h_2, w_2 = img2.shape
img4 = cv2.warpPerspective(img1, M, (w_2, h_2))

else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None

draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
singlePointColor=None,
matchesMask=matchesMask, # draw only inliers
flags=2)

img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, flags=2)
plt.axis('off')
plt.imshow(img3, 'gray'), plt.show()
cv2.imwrite('presentation_images\\sift.png', img3)
plt.axis('off')
plt.imshow(img4, 'gray'), plt.show()
cv2.imwrite('presentation_images\\transformed.png', img4)
plt.axis('off')
img4_bw = img4 < skimage.filters.threshold_local(img4, 133)
img4_bw = img_as_ubyte(img4_bw)
plt.imshow(img4_bw, 'gray'), plt.show()
cv2.imwrite('presentation_images\\bw_scorecard.png', img4_bw)
@@ -0,0 +1,70 @@
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''

from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K

batch_size = 128
num_classes = 10
epochs = 12

# input image dimensions
img_rows, img_cols = 28, 28

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])

model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save("new_model.h5")
@@ -0,0 +1,310 @@
import numpy as np
import cv2
from matplotlib import pyplot as plt
import skimage.transform
import skimage.filters
import numpy as np
import cv2
from keras import backend as K
from keras.models import load_model
from rubiks_database import getWinners, addInfoToDatabase
from skimage import img_as_ubyte


def get_scorecard_sift(image, template):

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(image, None)
kp2, des2 = sift.detectAndCompute(template, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.5 * n.distance:
good.append(m)

print("# Matches: ", len(good))

if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
print(M)
h, w = image.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(template, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

h_2, w_2 = img2.shape
adjusted_image = cv2.warpPerspective(image, M, (w_2, h_2))
# cv2.imshow("Warped", adjusted_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

return adjusted_image
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
return None


def predict_digits(digit_images, digit_flags):
img_rows = 28
img_cols = 28
for i in range(len(digit_images)):
if K.image_data_format() == 'channels_first':
digit_images[i] = digit_images[i].reshape(1, img_rows, img_cols)
else:
digit_images[i] = digit_images[i].reshape(img_rows, img_cols, 1)
model = load_model('CNN\\new_model.h5')
print("# Digits:", len(digit_images))
predictions = model.predict(np.asarray(digit_images))
# print(predictions)
predicted_digits = []
flags = []
i = 0
for prediction in predictions:
which_digit = np.argmax(prediction)
confidence = np.max(prediction)
print("Predicted", which_digit, "with confidence", confidence)
predicted_digits.append(which_digit)
if confidence < 0.75 or digit_flags[i] == 1:
flags.append(1)
else:
flags.append(0)
i += 1

return predicted_digits, flags


def get_id_from_scorecard(image):
bw = image < skimage.filters.threshold_local(image, 101)
bw = bw.astype("float32")
digits = []
for column in range(0, 3):
min_y = 134
max_y = 177
min_x = 43 + 54 * column
max_x = 90 + 54 * column
digit = bw[min_y:max_y, min_x:max_x]
digits.append(digit)
# cv2.imshow("digit", digit)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return digits


def extract_digit(digit):
cnts = cv2.findContours(img_as_ubyte(digit.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[1]
digitCnts = []
# print("Found", cnts, "number of contours")
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:2]
# loop over the digit area candidates
added = False
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if not ((40 >= h >= 12) and (40 >= w >= 3)):
continue
else:
added = True
digitCnts.append(c)
# cv2.imshow("Show", digit_draw)
# cv2.waitKey()
# cv2.destroyAllWindows()
digit_crop = digit[y:y + h, x:x + w]
# plt.imshow(digit), plt.show()
# plt.imshow(digit_crop), plt.show()
resize_ratio = min(20. / w, 20. / h)
resize_width = int(resize_ratio * w)
resize_height = int(resize_ratio * h)
digit_resized = skimage.transform.resize(digit_crop, (resize_height, resize_width), mode='constant')
# plt.imshow(digit), plt.show()
# plt.imshow(digit_resized), plt.show()
digit_28_28 = np.zeros((28, 28), dtype=float)
lower_bound_y = int((28 - resize_height) / 2)
upper_bound_y = int(resize_height + (28 - resize_height) / 2)
lower_bound_x = int((28 - resize_width) / 2)
upper_bound_x = int(resize_width + (28 - resize_width) / 2)
digit_28_28[lower_bound_y:upper_bound_y, lower_bound_x:upper_bound_x] = digit_resized
# plt.imshow(digit, 'gray'), plt.show()
# plt.imshow(digit_28_28, 'gray'), plt.show()
# cv2.imwrite('presentation_images\\digits\\bw_' + str(row_num) + '_' + str(column + 1) + '.png',
# img_as_ubyte(digit_crop))
# cv2.imwrite('presentation_images\\digits\\bw_28_' + str(row_num) + '_' + str(column + 1) + '.png',
# img_as_ubyte(digit_28_28))
return digit_28_28, 0
if not added:
# If a digit was not found, add on a blank image and a flag to show it isn't a digit
return np.zeros((28, 28)), 1


def get_digits_from_scorecard(image):
bw = image < skimage.filters.threshold_local(image, 101)
# plt.imshow(bw), plt.show()
cv2.imwrite('presentation_images\\bw_digits.png', img_as_ubyte(bw))

# cv2.imshow("Black and White", bw)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
digits = []
flags = []

for column in range(0, 3):
min_y = 134
max_y = 177
min_x = 43 + 54 * column
max_x = 90 + 54 * column
digit, flag = extract_digit(bw[min_y:max_y, min_x:max_x])
digits.append(digit)
flags.append(flag)

for row in range(0, 5):
for column in range(0, 7):
min_y = 233 + 49 * row
max_y = 273 + 49 * row
min_x = 43 + 54 * column
max_x = 90 + 54 * column
cv2.imshow("bw", np.float32(bw[min_y:max_y, min_x:max_x]))
cv2.waitKey(0)
cv2.destroyAllWindows()
digit, flag = extract_digit(bw[min_y:max_y, min_x:max_x])
cv2.imshow("digit", np.float32(digit))
cv2.waitKey(0)
cv2.destroyAllWindows()
digits.append(digit)
flags.append(flag)
# plt.imshow(digit_not_bw), plt.show()
# plt.imshow(digit), plt.show()

return digits, flags


# def get_row_of_digits_from_scorecard(image, row_num):
# adjusted_row_num = row_num - 1
# # print("shape", image.shape)
# bw = image < skimage.filters.threshold_local(image, 101)
# # plt.imshow(bw), plt.show()
# cv2.imwrite('presentation_images\\bw_digits.png', img_as_ubyte(bw))
#
# # cv2.imshow("Black and White", bw)
# # cv2.waitKey(0)
# # cv2.destroyAllWindows()
# digits = []
# for column in range(0, 7):
# min_y = 233 + 49 * adjusted_row_num
# max_y = 273 + 49 * adjusted_row_num
# min_x = 43 + 54 * column
# max_x = 90 + 54 * column
# digit_not_bw = image[min_y:max_y, min_x:max_x]
# digit = bw[min_y:max_y, min_x:max_x]
# # plt.imshow(digit_not_bw), plt.show()
# # plt.imshow(digit), plt.show()
#
# cnts = cv2.findContours(img_as_ubyte(digit.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cnts = cnts[1]
# digitCnts = []
# digit_draw = digit_not_bw.copy()
# # print("Found", cnts, "number of contours")
# cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:2]
# # loop over the digit area candidates
# added = False
# for c in cnts:
# # compute the bounding box of the contour
# (x, y, w, h) = cv2.boundingRect(c)
# # if the contour is sufficiently large, it must be a digit
# if not ((40 >= h >= 12) and (40 >= w >= 3)):
# continue
# else:
# added = True
# digitCnts.append(c)
# cv2.rectangle(digit_draw, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
# # cv2.imshow("Show", digit_draw)
# # cv2.waitKey()
# # cv2.destroyAllWindows()
# digit_crop = digit[y:y+h, x:x+w]
# # plt.imshow(digit), plt.show()
# # plt.imshow(digit_crop), plt.show()
# resize_ratio = min(20./w, 20./h)
# resize_width = int(resize_ratio * w)
# resize_height = int(resize_ratio * h)
# digit_resized = skimage.transform.resize(digit_crop, (resize_height, resize_width), mode='constant')
# # plt.imshow(digit), plt.show()
# # plt.imshow(digit_resized), plt.show()
# digit_28_28 = np.zeros((28, 28), dtype=float)
# lower_bound_y = int((28 - resize_height)/2)
# upper_bound_y = int(resize_height + (28 - resize_height)/2)
# lower_bound_x = int((28 - resize_width)/2)
# upper_bound_x = int(resize_width + (28 - resize_width)/2)
# digit_28_28[lower_bound_y:upper_bound_y, lower_bound_x:upper_bound_x] = digit_resized
# # plt.imshow(digit, 'gray'), plt.show()
# # plt.imshow(digit_28_28, 'gray'), plt.show()
# cv2.imwrite('presentation_images\\digits\\' + str(row_num) + '_' + str(column + 1) + '.png',
# img_as_ubyte(digit_not_bw))
# cv2.imwrite('presentation_images\\digits\\box_' + str(row_num) + '_' + str(column + 1) + '.png',
# img_as_ubyte(digit_draw))
# cv2.imwrite('presentation_images\\digits\\bw_' + str(row_num) + '_' + str(column + 1) + '.png',
# img_as_ubyte(digit_crop))
# cv2.imwrite('presentation_images\\digits\\bw_28_' + str(row_num) + '_' + str(column + 1) + '.png',
# img_as_ubyte(digit_28_28))
# digits.append(img_as_ubyte(digit_28_28))
# if not added:
# digits.append(np.zeros((28, 28)))
#
#
# # cv2.imshow("digit", digit)
# # cv2.waitKey(0)
# # cv2.destroyAllWindows()
# return digits, flags


def construct_id(digits):
return str(digits[0]) + str(digits[1]) + str(digits[2])


def construct_times(digits):
times = []
for i in range(0, 5):
round_time = str(digits[0 + 7*i]) + str(digits[1 + 7*i]) + ":" + str(digits[2 + 7*i]) + str(digits[3 + 7*i]) + ":" + str(digits[4 + 7*i]) + str(
digits[5 + 7*i]) + str(digits[6 + 7*i])
times.append(round_time)
return times


all_times = []
all_flags = []
image = cv2.imread('test_images\\final_template_test.jpg', 0)
template = cv2.imread('test_images\\template_new.png', 0)
adjusted_image = get_scorecard_sift(image, template)
if adjusted_image is None:
print("Could not extract image")
else:
all_digits, digit_flags = get_digits_from_scorecard(adjusted_image)
predictions, prediction_flags = predict_digits(all_digits, digit_flags)
comp_id = construct_id(predictions[0:3])
times = construct_times(predictions[3:])
# for time in times:
# print(time)
# for flag in prediction_flags:
# print(flag)

addInfoToDatabase(comp_id, times, prediction_flags)
getWinners()

@@ -0,0 +1,106 @@
import numpy as np
import cv2
from matplotlib import pyplot as plt

image = cv2.imread('test_images\\final_template_test.jpg', 0)
template = cv2.imread('test_images\\template_new.png', 0)

# Initiate STAR detector
orb = cv2.ORB_create()

# find the keypoints with ORB
kp_image = orb.detect(image, None)
kp_template = orb.detect(template, None)

# compute the descriptors with ORB
kp_image, des_image = orb.compute(image, kp_image)
kp_template, des_template = orb.compute(template, kp_template)

# draw only keypoints location,not size and orientation
image_keypoints = cv2.drawKeypoints(image, kp_image, None, color=(0, 255, 0), flags=0)
plt.imshow(image_keypoints), plt.show()

# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

# Match descriptors.
matches = bf.match(des_image, des_template)

# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)

# Draw first 10 matches.
img_matches = cv2.drawMatches(image, kp_image, template, kp_template, matches[:10], None, flags=2)

plt.imshow(img_matches), plt.show()

############ Flann Matcher

# MIN_MATCH_COUNT = 10
#
# FLANN_INDEX_KDTREE = 0
# FLANN_INDEX_LSH = 6
# index_params = dict(algorithm=FLANN_INDEX_LSH,
# table_number=6, # 12
# key_size=12, # 20
# multi_probe_level=1) # 2
# # index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
# search_params = dict(checks=50)
#
# flann = cv2.FlannBasedMatcher(index_params, search_params)
#
# matches = flann.knnMatch(des_image, des_template, k=2)

######## Brute Force Matcher

# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

# Match descriptors.
matches = bf.match(des_image, des_template)

# # Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)

# store all the good matches as per Lowe's ratio test.
# good = []
# for m_n in matches:
# if len(m_n) != 2:
# continue
# (m, n) = m_n
# if m.distance < 0.6 * n.distance:
# good.append(m)
good = matches

print("# Matches: ", len(good))

if len(good) > 0:
# Get a list of matching points in the scene image
src_pts = np.float32([kp_image[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
# Get a list of matching points in the template image
dst_pts = np.float32([kp_template[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# Find a homography which maps scene points to the template points
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 10)
matchesMask = mask.ravel().tolist()
print(M)
# Get the corners of the scene image
h, w = image.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
print(pts)
# Use the homography to warp the corners of the scene image to the template image
dst = cv2.perspectiveTransform(pts, M)
print(dst)
# Draw the warped scene corners to the template image in the scene image
img2 = cv2.polylines(template, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
cv2.imshow("image", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Warp the scene image to the size of the template image
h_2, w_2 = img2.shape
adjusted_image = cv2.warpPerspective(image, M, (w_2, h_2))
cv2.imshow("image", adjusted_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imshow("Warped", adjusted_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
@@ -0,0 +1,138 @@
# further information:
# * http://stackoverflow.com/questions/11114349/how-to-visualize-descriptor-matching-using-opencv-module-in-python
# * http://docs.opencv.org/doc/tutorials/features2d/feature_homography/feature_homography.html#feature-homography
# * http://stackoverflow.com/questions/9539473/opencv-orb-not-finding-matches-once-rotation-scale-invariances-are-introduced
# * OpenCV 2 Computer Vision Application Programming Cookbook, Chapter 9
import cv2
import scipy as sp
import numpy as np


ratio = 0.65


""" Clear matches for which NN ratio is > than threshold """
def filter_distance(matches):
dist = [m.distance for m in matches]
thres_dist = (sum(dist) / len(dist)) * ratio

# keep only the reasonable matches
sel_matches = [m for m in matches if m.distance < thres_dist]
print('#selected matches:%d (out of %d)' % (len(sel_matches), len(matches)))
return sel_matches


""" keep only symmetric matches """
def filter_asymmetric(matches, matches2):
sel_matches = []
for match1 in matches:
for match2 in matches2:
if k_ftr[match1.queryIdx] == k_ftr[match2.trainIdx] and k_scene[match1.trainIdx] == k_scene[match2.queryIdx]:
sel_matches.append(match1)
break
return sel_matches

# Todo: filter_ransac

def filter_matches(matches, matches2):
matches = filter_distance(matches)
matches2 = filter_distance(matches2)
return filter_asymmetric(matches, matches2)




img1_path = "test_images\\final_template_test.jpg"
img2_path = "test_images\\template_new.png"

img_scene = cv2.imread(img1_path, 0)
img_ftr = cv2.imread(img2_path, 0)

detector = cv2.ORB_create()
descriptor = detector
matcher = cv2.BFMatcher(cv2.NORM_HAMMING)

# detector = cv2.FeatureDetector_create("ORB") #SURF
# descriptor = cv2.DescriptorExtractor_create("ORB") #BRIEF
# matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming") #FlannBased #BruteForce-Hamming

# detect keypoints
kp_scene = detector.detect(img_scene)
kp_ftr = detector.detect(img_ftr)

print('#keypoints in image1: %d, image2: %d' % (len(kp_scene), len(kp_ftr)))

# descriptors
k_scene, d_scene = descriptor.compute(img_scene, kp_scene)
k_ftr, d_ftr = descriptor.compute(img_ftr, kp_ftr)

print('#keypoints in image1: %d, image2: %d' % (len(d_scene), len(d_ftr)))

# match the keypoints
matches = matcher.match(d_scene, d_ftr)
matches2 = matcher.match(d_ftr, d_scene)

# visualize the matches
print('#matches:', len(matches))
dist = [m.distance for m in matches]

print('distance: min: %.3f' % min(dist))
print('distance: mean: %.3f' % (sum(dist) / len(dist)))
print('distance: max: %.3f' % max(dist))



""" filter matches """

sel_matches = filter_matches(matches,matches2)


""" localize object """

h_scene, w_scene = img_scene.shape[:2]
h_ftr, w_ftr = img_ftr.shape[:2]

ftr =[]
scene = []

for m in sel_matches:
scene.append(k_scene[m.queryIdx].pt)
ftr.append(k_ftr[m.trainIdx].pt)

ftr = np.float32(ftr)
scene = np.float32(scene)

homography, mask = cv2.findHomography(scene, ftr, cv2.RANSAC, 0.3)
ftr_corners = np.float32([[0, 0], [w_ftr, 0], [w_ftr, h_ftr], [0, h_ftr]]).reshape(1, -1, 2)
print(ftr_corners)
corners = np.int32(cv2.perspectiveTransform(ftr_corners, homography).reshape(-1, 2))
print(corners)

h_2, w_2 = img_ftr.shape
adjusted_image = cv2.warpPerspective(img_scene, homography, (w_2, h_2))
cv2.imshow("image", adjusted_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

""" visualization """

view = sp.zeros((max(h_scene, h_ftr), w_scene + w_ftr, 3), np.uint8)
view[:h_scene, :w_scene, 0] = img_scene
view[:h_ftr, w_scene:, 0] = img_ftr
view[:, :, 1] = view[:, :, 0]
view[:, :, 2] = view[:, :, 0]

for m in sel_matches:
# draw the keypoints
color = tuple([sp.random.randint(0, 255) for _ in range(3)])
cv2.line(view, (int(k_scene[m.queryIdx].pt[0]), int(k_scene[m.queryIdx].pt[1])),
(int(k_ftr[m.trainIdx].pt[0] + w_scene), int(k_ftr[m.trainIdx].pt[1])), color, 2)


cv2.polylines(view, [np.int32([c+[w_scene, 0] for c in ftr_corners])], True, (0, 255, 0), 2)
cv2.polylines(view, [corners], True, (255, 255, 0), 2)

#cv2.imshow("view", view)
cv2.imwrite("output.jpg", view)
#cv2.waitKey()

@@ -0,0 +1,69 @@
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''

from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K

batch_size = 128
num_classes = 10
epochs = 12

# input image dimensions
img_rows, img_cols = 28, 28

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',input_shape=input_shape))
#model.add(Conv2D(4, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
model.add(Flatten())
#model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))

model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])

model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

model.save('samantha.h5')
@@ -0,0 +1,255 @@
import numpy as np
import cv2
from matplotlib import pyplot as plt
import skimage.transform
import skimage.filters
import numpy as np
import cv2
from keras import backend as K
from keras.models import load_model
from rubiks_database import getWinners, addInfoToDatabase
from skimage import img_as_ubyte


def get_scorecard_sift(image, template):

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(image, None)
kp2, des2 = sift.detectAndCompute(template, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.5 * n.distance:
good.append(m)

print("# Matches: ", len(good))

if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
print(M)
h, w = image.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(template, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

h_2, w_2 = img2.shape
adjusted_image = cv2.warpPerspective(image, M, (w_2, h_2))
# cv2.imshow("Warped", adjusted_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

return adjusted_image
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
return None


def predict_digits(digit_images):
img_row = 28
img_cols = 28
for i in range(len(digit_images)):
if K.image_data_format() == 'channels_first':
digit_images[i] = digit_images[i].reshape(1, img_rows, img_cols)
else:
digit_images[i] = digit_images[i].reshape(img_rows, img_cols, 1)
model = load_model('CNN\\new_model.h5')
prediction = model.predict(np.asarray(digit_images))[0]
which_digit = np.argmax(prediction)
confidence = np.max(prediction)
return which_digit, confidence


def predict_digit(image):
# input image dimensions
img_rows, img_cols = 28, 28
image = skimage.transform.resize(image, (img_cols, img_rows), mode='constant')
if K.image_data_format() == 'channels_first':
image = image.reshape(1, img_rows, img_cols)
else:
image = image.reshape(img_rows, img_cols, 1)

model = load_model('CNN\\new_model.h5')

prediction = model.predict(np.asarray([image]))[0]
which_digit = np.argmax(prediction)
confidence = np.max(prediction)
return which_digit, confidence


def get_id_from_scorecard(image):
bw = image < skimage.filters.threshold_local(image, 101)
bw = bw.astype("float32")
digits = []
for column in range(0, 3):
min_y = 134
max_y = 177
min_x = 43 + 54 * column
max_x = 90 + 54 * column
digit = bw[min_y:max_y, min_x:max_x]
digits.append(digit)
# cv2.imshow("digit", digit)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return digits


def get_row_of_digits_from_scorecard(image, row_num):
adjusted_row_num = row_num - 1
# print("shape", image.shape)
bw = image < skimage.filters.threshold_local(image, 101)
# plt.imshow(bw), plt.show()
cv2.imwrite('presentation_images\\bw_digits.png', img_as_ubyte(bw))

# cv2.imshow("Black and White", bw)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
digits = []
for column in range(0, 7):
min_y = 233 + 49 * adjusted_row_num
max_y = 273 + 49 * adjusted_row_num
min_x = 43 + 54 * column
max_x = 90 + 54 * column
digit_not_bw = image[min_y:max_y, min_x:max_x]
digit = bw[min_y:max_y, min_x:max_x]
# plt.imshow(digit_not_bw), plt.show()
# plt.imshow(digit), plt.show()

cnts = cv2.findContours(img_as_ubyte(digit.copy()), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[1]
digitCnts = []
digit_draw = digit_not_bw.copy()
# print("Found", cnts, "number of contours")
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:2]
# loop over the digit area candidates
added = False
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if not ((40 >= h >= 12) and (40 >= w >= 3)):
continue
else:
added = True
digitCnts.append(c)
cv2.rectangle(digit_draw, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
# cv2.imshow("Show", digit_draw)
# cv2.waitKey()
# cv2.destroyAllWindows()
digit_crop = digit[y:y+h, x:x+w]
# plt.imshow(digit), plt.show()
# plt.imshow(digit_crop), plt.show()
resize_ratio = min(20./w, 20./h)
resize_width = int(resize_ratio * w)
resize_height = int(resize_ratio * h)
digit_resized = skimage.transform.resize(digit_crop, (resize_height, resize_width), mode='constant')
# plt.imshow(digit), plt.show()
# plt.imshow(digit_resized), plt.show()
digit_28_28 = np.zeros((28, 28), dtype=float)
lower_bound_y = int((28 - resize_height)/2)
upper_bound_y = int(resize_height + (28 - resize_height)/2)
lower_bound_x = int((28 - resize_width)/2)
upper_bound_x = int(resize_width + (28 - resize_width)/2)
digit_28_28[lower_bound_y:upper_bound_y, lower_bound_x:upper_bound_x] = digit_resized
# plt.imshow(digit, 'gray'), plt.show()
# plt.imshow(digit_28_28, 'gray'), plt.show()
cv2.imwrite('presentation_images\\digits\\' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_not_bw))
cv2.imwrite('presentation_images\\digits\\box_' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_draw))
cv2.imwrite('presentation_images\\digits\\bw_' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_crop))
cv2.imwrite('presentation_images\\digits\\bw_28_' + str(row_num) + '_' + str(column + 1) + '.png',
img_as_ubyte(digit_28_28))
digits.append(img_as_ubyte(digit_28_28))
if not added:
digits.append(np.zeros((28, 28)))


# cv2.imshow("digit", digit)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return digits, flags


def construct_id(digit_images):
digits = []
digit_flags = []
i = 0
for digit in digit_images:
predicted_digit, confidence = predict_digit(digit)
print("Predicted:", predicted_digit, "with confidence", 100*confidence)
digits.append(predicted_digit)
if confidence < 0.5:
digit_flags.append(str(i))
i += 1
comp_ip = str(digits[0]) + str(digits[1]) + str(digits[2])
return comp_ip, digit_flags


def construct_time(digit_images):
digits = []
digit_flags = []
i = 0
digits_adjusted = []

for digit in digit_images:
if digit is None:
predicted_digit = 0
confidence = 0.
else:
predicted_digit, confidence = predict_digit(digit)
print("Predicted:", predicted_digit, "with confidence", 100*confidence)
digits.append(predicted_digit)
if confidence < 0.5:
digit_flags.append(str(i))
i += 1
time = str(digits[0]) + str(digits[1]) + ":" + str(digits[2]) + str(digits[3]) + ":" + str(digits[4]) + str(
digits[5]) + str(digits[6])
return time, digit_flags

all_times = []
image = cv2.imread('test_images\\samanthas_shadow.jpg', 0)
template = cv2.imread('test_images\\template_new.png', 0)
adjusted_image = get_scorecard_sift(image, template)
if adjusted_image is None:
print("Could not extract image")
else:
all_flags = []
id_digits = get_id_from_scorecard(adjusted_image)
comp_ip, flags = construct_id(id_digits)
all_flags.append(flags)
print("Competitor id:", comp_ip)
digit_images = []
for row in range(1, 6):
row_of_digits, flags = get_row_of_digits_from_scorecard(adjusted_image, row)
for digit in row_of_digits:
digit_images.append(digit)

constructed_time, flags = construct_time(digit_images)
all_flags.append(flags)
all_times.append(constructed_time)
print(constructed_time)
for flag in all_flags:
print(flag)
addInfoToDatabase(comp_ip, all_times, all_flags)
getWinners()

@@ -0,0 +1,84 @@
import numpy as np
import cv2

def get_scorecard_sift(image, template):

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(image, None)
kp2, des2 = sift.detectAndCompute(template, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.5 * n.distance:
good.append(m)

print("# Matches: ", len(good))

if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
print(M)
h, w = image.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(template, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
cv2.imshow("image", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()

h_2, w_2 = img2.shape
adjusted_image = cv2.warpPerspective(image, M, (w_2, h_2))
cv2.imshow("Warped", adjusted_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

return adjusted_image
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
return None


cap = cv2.VideoCapture(0)
template = cv2.imread('test_images\\template_new.png', 0)
while (True):
# Capture frame-by-frame
ret, frame = cap.read()

# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
detected_scorecard = get_scorecard_sift(gray, template)
if detected_scorecard is None:
print("No scorecard found.")
else:
print("Got a scorecard.")
break
# Display the resulting frame
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break # When everything done, release the capture

cap.release()
cv2.destroyAllWindows()

cv2.imshow("scorecard", detected_scorecard)
cv2.waitKey(0)
cv2.destroyAllWindows()

@@ -0,0 +1,90 @@
import cv2
import urllib.request
import numpy as np
import sys

def get_scorecard_sift(image, template):

MIN_MATCH_COUNT = 10

# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(image, None)
kp2, des2 = sift.detectAndCompute(template, None)

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params, search_params)

matches = flann.knnMatch(des1, des2, k=2)

# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.5 * n.distance:
good.append(m)

print("# Matches: ", len(good))

if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
print(M)
h, w = image.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(template, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)

h_2, w_2, _ = img2.shape
adjusted_image = cv2.warpPerspective(image, M, (w_2, h_2))
# cv2.imshow("Warped", adjusted_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()

return adjusted_image
else:
print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
matchesMask = None
return None



host = "192.168.137.82:8080"
if len(sys.argv) > 1:
host = sys.argv[1]

hoststr = 'http://' + host + '/video?x.mjpeg'
print('Streaming ' + hoststr)

stream = urllib.request.urlopen(hoststr)

template = cv2.imread('test_images\\template_new.png')
bytes = bytes()
i = 0
while True:
bytes += stream.read(1024)
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b + 2]
bytes = bytes[b + 2:]
frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow(host, frame)
if i % 30 == 0:
adjusted_image = get_scorecard_sift(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), template)
if adjusted_image is not None:
cv2.imshow("image", adjusted_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Press escape to close
if cv2.waitKey(1) == 27:
exit(0)
i += 1