-
Notifications
You must be signed in to change notification settings - Fork 1
/
CarDetection.py
92 lines (78 loc) · 2.99 KB
/
CarDetection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import libraries of python opencv
from skimage.measure import compare_ssim
import argparse
import imutils
import cv2
import numpy as np
import time
from firebase import firebase
firebase = firebase.FirebaseApplication('https://traffic-managment.firebaseio.com/',None)
#create VideoCapture object and read from video file
cap = cv2.VideoCapture('cars.mp4')
#use trained cars XML classifiers
#car_cascade = cv2.CascadeClassifier('cars.xml')
count = 0
#read until video is completed
t1 = time.time()
while True:
#capture frame by frame
ret, frame = cap.read()
#convert video into gray scale of each frames
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#
# #detect cars in the video
# cars = car_cascade.detectMultiScale(gray, 1.1, 3)
#
# #to draw arectangle in each cars
# for (x,y,w,h) in cars:
# cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
# count = count+1
# print(count)
#display the resulting frame
cv2.imshow('video', frame)
# print(t)
#press Q on keyboard to exit
t = time.time()
if(t > t1+5):
print("**************************************")
cv2.imwrite("card" + ".jpg",frame)
imageA = cv2.imread("C:\\Users\\Punit\\Downloads\\card2.jpg")
imageB = cv2.imread("C:\\Users\\Punit\\Downloads\\card.jpg")
# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
data = 100 - score*100
result = firebase.put('Density','Lane1' , data )
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
t1 = time.time()
# loop over the contours
# for c in cnts:
# # compute the bounding box of the contour and then draw the
# # bounding box on both input images to represent where the two
# # images differ
# (x, y, w, h) = cv2.boundingRect(c)
# cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
# cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
#
# # show the output images
# cv2.imshow("Original", imageA)
# cv2.imshow("Modified", imageB)
# cv2.imshow("Diff", diff)
# cv2.imshow("Thresh", thresh)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
#release the videocapture object
cap.release()
#close all the frames
cv2.destroyAllWindows()