|
1 |
| -import cv2 |
| 1 | +# importing libraries |
2 | 2 | import numpy as np
|
3 |
| -from skimage.filters import threshold_local |
4 |
| -import tensorflow as tf |
5 |
| -from skimage import measure |
6 |
| -import imutils |
7 |
| - |
8 |
| -def sort_cont(character_contours): |
9 |
| - """ |
10 |
| - To sort contours from left to right |
11 |
| - """ |
12 |
| - i = 0 |
13 |
| - boundingBoxes = [cv2.boundingRect(c) for c in character_contours] |
14 |
| - (character_contours, boundingBoxes) = zip(*sorted(zip(character_contours, boundingBoxes), |
15 |
| - key=lambda b: b[1][i], reverse=False)) |
16 |
| - return character_contours |
17 |
| - |
18 |
| - |
19 |
| -def segment_chars(plate_img, fixed_width): |
20 |
| - """ |
21 |
| - extract Value channel from the HSV format of image and apply adaptive thresholding |
22 |
| - to reveal the characters on the license plate |
23 |
| - """ |
24 |
| - V = cv2.split(cv2.cvtColor(plate_img, cv2.COLOR_BGR2HSV))[2] |
25 |
| - |
26 |
| - T = threshold_local(V, 29, offset=15, method='gaussian') |
27 |
| - |
28 |
| - thresh = (V > T).astype('uint8') * 255 |
29 |
| - |
30 |
| - thresh = cv2.bitwise_not(thresh) |
31 |
| - |
32 |
| - # resize the license plate region to a canoncial size |
33 |
| - plate_img = imutils.resize(plate_img, width=fixed_width) |
34 |
| - thresh = imutils.resize(thresh, width=fixed_width) |
35 |
| - bgr_thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR) |
36 |
| - |
37 |
| - # perform a connected components analysis and initialize the mask to store the locations |
38 |
| - # of the character candidates |
39 |
| - labels = measure.label(thresh, neighbors=8, background=0) |
40 |
| - |
41 |
| - charCandidates = np.zeros(thresh.shape, dtype='uint8') |
42 |
| - |
43 |
| - # loop over the unique components |
44 |
| - characters = [] |
45 |
| - for label in np.unique(labels): |
46 |
| - # if this is the background label, ignore it |
47 |
| - if label == 0: |
48 |
| - continue |
49 |
| - # otherwise, construct the label mask to display only connected components for the |
50 |
| - # current label, then find contours in the label mask |
51 |
| - labelMask = np.zeros(thresh.shape, dtype='uint8') |
52 |
| - labelMask[labels == label] = 255 |
53 |
| - |
54 |
| - cnts = cv2.findContours(labelMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
55 |
| - cnts = cnts[0] if imutils.is_cv2() else cnts[1] |
56 |
| - |
57 |
| - # ensure at least one contour was found in the mask |
58 |
| - if len(cnts) > 0: |
59 |
| - |
60 |
| - # grab the largest contour which corresponds to the component in the mask, then |
61 |
| - # grab the bounding box for the contour |
62 |
| - c = max(cnts, key=cv2.contourArea) |
63 |
| - (boxX, boxY, boxW, boxH) = cv2.boundingRect(c) |
64 |
| - |
65 |
| - # compute the aspect ratio, solodity, and height ration for the component |
66 |
| - aspectRatio = boxW / float(boxH) |
67 |
| - solidity = cv2.contourArea(c) / float(boxW * boxH) |
68 |
| - heightRatio = boxH / float(plate_img.shape[0]) |
69 |
| - |
70 |
| - # determine if the aspect ratio, solidity, and height of the contour pass |
71 |
| - # the rules tests |
72 |
| - keepAspectRatio = aspectRatio < 1.0 |
73 |
| - keepSolidity = solidity > 0.15 |
74 |
| - keepHeight = heightRatio > 0.5 and heightRatio < 0.95 |
75 |
| - |
76 |
| - # check to see if the component passes all the tests |
77 |
| - if keepAspectRatio and keepSolidity and keepHeight and boxW > 14: |
78 |
| - # compute the convex hull of the contour and draw it on the character |
79 |
| - # candidates mask |
80 |
| - hull = cv2.convexHull(c) |
81 |
| - |
82 |
| - cv2.drawContours(charCandidates, [hull], -1, 255, -1) |
83 |
| - |
84 |
| - _, contours, hier = cv2.findContours(charCandidates, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
85 |
| - if contours: |
86 |
| - contours = sort_cont(contours) |
87 |
| - addPixel = 4 # value to be added to each dimension of the character |
88 |
| - for c in contours: |
89 |
| - (x, y, w, h) = cv2.boundingRect(c) |
90 |
| - if y > addPixel: |
91 |
| - y = y - addPixel |
92 |
| - else: |
93 |
| - y = 0 |
94 |
| - if x > addPixel: |
95 |
| - x = x - addPixel |
96 |
| - else: |
97 |
| - x = 0 |
98 |
| - temp = bgr_thresh[y:y + h + (addPixel * 2), x:x + w + (addPixel * 2)] |
99 |
| - |
100 |
| - characters.append(temp) |
101 |
| - return characters |
102 |
| - else: |
103 |
| - return None |
104 |
| - |
105 |
| - |
106 |
| -class PlateFinder: |
107 |
| - def __init__(self): |
108 |
| - self.min_area = 4500 # minimum area of the plate |
109 |
| - self.max_area = 30000 # maximum area of the plate |
110 |
| - |
111 |
| - self.element_structure = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(22, 3)) |
112 |
| - |
113 |
| - def preprocess(self, input_img): |
114 |
| - imgBlurred = cv2.GaussianBlur(input_img, (7, 7), 0) # old window was (5,5) |
115 |
| - gray = cv2.cvtColor(imgBlurred, cv2.COLOR_BGR2GRAY) # convert to gray |
116 |
| - sobelx = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3) # sobelX to get the vertical edges |
117 |
| - ret2, threshold_img = cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) |
| 3 | +import cv2 |
| 4 | +import imutils |
| 5 | +import sys |
| 6 | +import pytesseract |
| 7 | +import pandas as pd |
| 8 | +import time |
118 | 9 |
|
119 |
| - element = self.element_structure |
120 |
| - morph_n_thresholded_img = threshold_img.copy() |
121 |
| - cv2.morphologyEx(src=threshold_img, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_n_thresholded_img) |
122 |
| - return morph_n_thresholded_img |
| 10 | +image = cv2.imread('car.jpeg') |
123 | 11 |
|
124 |
| - def extract_contours(self, after_preprocess): |
125 |
| - _, contours, _ = cv2.findContours(after_preprocess, mode=cv2.RETR_EXTERNAL, |
126 |
| - method=cv2.CHAIN_APPROX_NONE) |
127 |
| - return contours |
| 12 | +image = imutils.resize(image, width=500) |
128 | 13 |
|
129 |
| - def clean_plate(self, plate): |
130 |
| - gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY) |
131 |
| - thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) |
132 |
| - _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) |
| 14 | +# displaying it |
| 15 | +cv2.imshow("Original Image", image) |
133 | 16 |
|
134 |
| - if contours: |
135 |
| - areas = [cv2.contourArea(c) for c in contours] |
136 |
| - max_index = np.argmax(areas) # index of the largest contour in the area array |
| 17 | +# converting it into gtray scale |
| 18 | +# cv2.imshow("1 - Grayscale Conversion", gray) |
| 19 | +gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
137 | 20 |
|
138 |
| - max_cnt = contours[max_index] |
139 |
| - max_cntArea = areas[max_index] |
140 |
| - x, y, w, h = cv2.boundingRect(max_cnt) |
141 |
| - rect = cv2.minAreaRect(max_cnt) |
142 |
| - rotatedPlate = plate |
143 |
| - if not self.ratioCheck(max_cntArea, rotatedPlate.shape[1], rotatedPlate.shape[0]): |
144 |
| - return plate, False, None |
145 |
| - return rotatedPlate, True, [x, y, w, h] |
146 |
| - else: |
147 |
| - return plate, False, None |
| 21 | +#cv2.imshow("2 - Bilateral Filter", gray) |
| 22 | +gray = cv2.bilateralFilter(gray, 11, 17, 17) |
148 | 23 |
|
| 24 | +# canny edge detector |
| 25 | +#cv2.imshow("4 - Canny Edges", edged) |
| 26 | +edged = cv2.Canny(gray, 170, 200) |
149 | 27 |
|
| 28 | +""" |
| 29 | +there are three arguments in cv2.findContours() function, first one is source image, |
| 30 | +second is contour retrieval mode, third is contour approximation method.py |
150 | 31 |
|
151 |
| - def check_plate(self, input_img, contour): |
152 |
| - min_rect = cv2.minAreaRect(contour) |
153 |
| - if self.validateRatio(min_rect): |
154 |
| - x, y, w, h = cv2.boundingRect(contour) |
155 |
| - after_validation_img = input_img[y:y + h, x:x + w] |
156 |
| - after_clean_plate_img, plateFound, coordinates = self.clean_plate(after_validation_img) |
157 |
| - if plateFound: |
158 |
| - characters_on_plate = self.find_characters_on_plate(after_clean_plate_img) |
159 |
| - if (characters_on_plate is not None and len(characters_on_plate) == 8): |
160 |
| - x1, y1, w1, h1 = coordinates |
161 |
| - coordinates = x1 + x, y1 + y |
162 |
| - after_check_plate_img = after_clean_plate_img |
163 |
| - return after_check_plate_img, characters_on_plate, coordinates |
164 |
| - return None, None, None |
| 32 | +If you pass cv2.CHAIN_APPROX_NONE, all the boundary points are stored. |
| 33 | +But actually do we need all the points? For eg, you found the contour of a straight line. |
| 34 | +Do you need all the points on the line to represent that line? |
| 35 | +No, we need just two end points of that line. |
| 36 | +This is what cv2.CHAIN_APPROX_SIMPLE does. |
| 37 | +It removes all redundant points and compresses the contour, thereby saving memory. |
| 38 | +""" |
165 | 39 |
|
| 40 | +cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) |
166 | 41 |
|
| 42 | +# contour area is given by the function cv2.contourArea() |
| 43 | +cnts=sorted(cnts, key = cv2.contourArea, reverse = True)[:30] |
| 44 | +NumberPlateCnt = None |
167 | 45 |
|
168 |
| - def find_possible_plates(self, input_img): |
169 |
| - """ |
170 |
| - Finding all possible contours that can be plates |
| 46 | +count = 0 |
| 47 | +for c in cnts: |
171 | 48 | """
|
172 |
| - plates = [] |
173 |
| - self.char_on_plate = [] |
174 |
| - self.corresponding_area = [] |
175 |
| - |
176 |
| - self.after_preprocess = self.preprocess(input_img) |
177 |
| - possible_plate_contours = self.extract_contours(self.after_preprocess) |
178 |
| - |
179 |
| - for cnts in possible_plate_contours: |
180 |
| - plate, characters_on_plate, coordinates = self.check_plate(input_img, cnts) |
181 |
| - if plate is not None: |
182 |
| - plates.append(plate) |
183 |
| - self.char_on_plate.append(characters_on_plate) |
184 |
| - self.corresponding_area.append(coordinates) |
185 |
| - |
186 |
| - if (len(plates) > 0): |
187 |
| - return plates |
188 |
| - else: |
189 |
| - return None |
190 |
| - |
191 |
| - def find_characters_on_plate(self, plate): |
192 |
| - |
193 |
| - charactersFound = segment_chars(plate, 400) |
194 |
| - if charactersFound: |
195 |
| - return charactersFound |
196 |
| - |
197 |
| - # PLATE FEATURES |
198 |
| - def ratioCheck(self, area, width, height): |
199 |
| - min = self.min_area |
200 |
| - max = self.max_area |
201 |
| - |
202 |
| - ratioMin = 3 |
203 |
| - ratioMax = 6 |
204 |
| - |
205 |
| - ratio = float(width) / float(height) |
206 |
| - if ratio < 1: |
207 |
| - ratio = 1 / ratio |
208 |
| - |
209 |
| - if (area < min or area > max) or (ratio < ratioMin or ratio > ratioMax): |
210 |
| - return False |
211 |
| - return True |
212 |
| - |
213 |
| - def preRatioCheck(self, area, width, height): |
214 |
| - min = self.min_area |
215 |
| - max = self.max_area |
216 |
| - |
217 |
| - ratioMin = 2.5 |
218 |
| - ratioMax = 7 |
219 |
| - |
220 |
| - ratio = float(width) / float(height) |
221 |
| - if ratio < 1: |
222 |
| - ratio = 1 / ratio |
223 |
| - |
224 |
| - if (area < min or area > max) or (ratio < ratioMin or ratio > ratioMax): |
225 |
| - return False |
226 |
| - return True |
227 |
| - |
228 |
| - def validateRatio(self, rect): |
229 |
| - (x, y), (width, height), rect_angle = rect |
230 |
| - |
231 |
| - if (width > height): |
232 |
| - angle = -rect_angle |
233 |
| - else: |
234 |
| - angle = 90 + rect_angle |
235 |
| - |
236 |
| - if angle > 15: |
237 |
| - return False |
238 |
| - if (height == 0 or width == 0): |
239 |
| - return False |
240 |
| - |
241 |
| - area = width * height |
242 |
| - if not self.preRatioCheck(area, width, height): |
243 |
| - return False |
244 |
| - else: |
245 |
| - return True |
246 |
| - |
247 |
| - |
248 |
| -class NeuralNetwork: |
249 |
| - def __init__(self): |
250 |
| - self.model_file = "./model/binary_128_0.50_ver3.pb" |
251 |
| - self.label_file = "./model/binary_128_0.50_labels_ver2.txt" |
252 |
| - self.label = self.load_label(self.label_file) |
253 |
| - self.graph = self.load_graph(self.model_file) |
254 |
| - self.sess = tf.Session(graph=self.graph) |
255 |
| - |
256 |
| - def load_graph(self, modelFile): |
257 |
| - graph = tf.Graph() |
258 |
| - graph_def = tf.GraphDef() |
259 |
| - with open(modelFile, "rb") as f: |
260 |
| - graph_def.ParseFromString(f.read()) |
261 |
| - with graph.as_default(): |
262 |
| - tf.import_graph_def(graph_def) |
263 |
| - return graph |
264 |
| - |
265 |
| - def load_label(self, labelFile): |
266 |
| - label = [] |
267 |
| - proto_as_ascii_lines = tf.gfile.GFile(labelFile).readlines() |
268 |
| - for l in proto_as_ascii_lines: |
269 |
| - label.append(l.rstrip()) |
270 |
| - return label |
271 |
| - |
272 |
| - def convert_tensor(self, image, imageSizeOuput): |
| 49 | + contour perimeter is also called arclength. It can be found out using cv2.arcLength() |
| 50 | + function.Second argument specify whether shape is a closed contour( if passed |
| 51 | + True), or just a curve. |
273 | 52 | """
|
274 |
| - takes an image and tranform it in tensor |
275 |
| - """ |
276 |
| - image = cv2.resize(image, dsize=(imageSizeOuput, imageSizeOuput), interpolation=cv2.INTER_CUBIC) |
277 |
| - np_image_data = np.asarray(image) |
278 |
| - np_image_data = cv2.normalize(np_image_data.astype('float'), None, -0.5, .5, cv2.NORM_MINMAX) |
279 |
| - np_final = np.expand_dims(np_image_data, axis=0) |
280 |
| - return np_final |
281 |
| - |
282 |
| - def label_image(self, tensor): |
283 |
| - |
284 |
| - input_name = "import/input" |
285 |
| - output_name = "import/final_result" |
286 |
| - |
287 |
| - input_operation = self.graph.get_operation_by_name(input_name) |
288 |
| - output_operation = self.graph.get_operation_by_name(output_name) |
289 |
| - |
290 |
| - results = self.sess.run(output_operation.outputs[0], |
291 |
| - {input_operation.outputs[0]: tensor}) |
292 |
| - results = np.squeeze(results) |
293 |
| - labels = self.label |
294 |
| - top = results.argsort()[-1:][::-1] |
295 |
| - return labels[top[0]] |
| 53 | + peri = cv2.arcLength(c, True) |
| 54 | + approx = cv2.approxPolyDP(c, 0.02 * peri, True) |
| 55 | + if len(approx) == 4: |
| 56 | + NumberPlateCnt = approx |
| 57 | + break |
296 | 58 |
|
297 |
| - def label_image_list(self, listImages, imageSizeOuput): |
298 |
| - plate = "" |
299 |
| - for img in listImages: |
300 |
| - if cv2.waitKey(25) & 0xFF == ord('q'): |
301 |
| - break |
302 |
| - plate = plate + self.label_image(self.convert_tensor(img, imageSizeOuput)) |
303 |
| - return plate, len(plate) |
| 59 | +# Masking the part other than the number plate |
| 60 | +mask = np.zeros(gray.shape,np.uint8) |
| 61 | +new_image = cv2.drawContours(mask,[NumberPlateCnt],0,255,-1) |
| 62 | +new_image = cv2.bitwise_and(image,image,mask=mask) |
| 63 | +cv2.namedWindow("Final_image",cv2.WINDOW_NORMAL) |
| 64 | +cv2.imshow("Final_image",new_image) |
304 | 65 |
|
| 66 | +# Configuration for tesseract |
| 67 | +config = ('-l eng --oem 1 --psm 3') |
305 | 68 |
|
306 |
| -if __name__ == "__main__": |
307 |
| - findPlate = PlateFinder() |
| 69 | +# Run tesseract OCR on image |
| 70 | +text = pytesseract.image_to_string(new_image, config=config) |
308 | 71 |
|
309 |
| - # Initialize the Neural Network |
310 |
| - model = NeuralNetwork() |
| 72 | +#Data is stored in CSV file |
| 73 | +raw_data = {'date': [time.asctime( time.localtime(time.time()) )], |
| 74 | + 'v_number': [text]} |
311 | 75 |
|
312 |
| - cap = cv2.VideoCapture('test_videos/test.MOV') |
313 |
| - while (cap.isOpened()): |
314 |
| - ret, img = cap.read() |
315 |
| - if ret == True: |
316 |
| - cv2.imshow('original video', img) |
317 |
| - if cv2.waitKey(25) & 0xFF == ord('q'): |
318 |
| - break |
319 |
| - # cv2.waitKey(0) |
320 |
| - possible_plates = findPlate.find_possible_plates(img) |
321 |
| - if possible_plates is not None: |
322 |
| - for i, p in enumerate(possible_plates): |
323 |
| - chars_on_plate = findPlate.char_on_plate[i] |
324 |
| - recognized_plate, _ = model.label_image_list(chars_on_plate, imageSizeOuput=128) |
325 |
| - print(recognized_plate) |
326 |
| - cv2.imshow('plate', p) |
327 |
| - if cv2.waitKey(25) & 0xFF == ord('q'): |
328 |
| - break |
| 76 | +df = pd.DataFrame(raw_data, columns = ['date', 'v_number']) |
| 77 | +df.to_csv('data.csv') |
329 | 78 |
|
| 79 | +# Print recognized text |
| 80 | +print(text) |
330 | 81 |
|
331 |
| - else: |
332 |
| - break |
333 |
| - cap.release() |
334 |
| - cv2.destroyAllWindows() |
| 82 | +cv2.waitKey(0) |
0 commit comments