-
Notifications
You must be signed in to change notification settings - Fork 2
/
main.py
122 lines (95 loc) · 4.03 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
from moviepy.editor import VideoFileClip
def undistort_img():
# Prepare object points 0,0,0 ... 8,5,0
obj_pts = np.zeros((6*9, 3), np.float32)
obj_pts[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Stores all object points & img points from all images
objpoints = []
imgpoints = []
# Get directory for all calibration images
images = glob.glob('E:\Document folder\Curved-Lane-Lines-master\camera_cal')
for indx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if ret == True:
objpoints.append(obj_pts)
imgpoints.append(corners)
# Test undistortion on img
img_size = (img.shape[1], img.shape[0])
# Calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
# Save camera calibration for later use
dist_pickle = {}
dist_pickle['mtx'] = mtx
dist_pickle['dist'] = dist
pickle.dump(dist_pickle, open(r'C:\Users\Atif Traders\Documents\Curved-Lane-Lines-master\camera_cal\cal_pickle.p', 'wb'))
def undistort(img, cal_dir=r'C:\Users\Atif Traders\Documents\Curved-Lane-Lines-master\camera_cal\cal_pickle.p'):
with open(cal_dir, mode='rb') as f:
file = pickle.load(f)
mtx = file['mtx']
dist = file['dist']
dst = cv2.undistort(img, mtx, dist, None, mtx)
return dst
def pipeline(img, s_thresh=(100, 255), sx_thresh=(15, 255)):
img = undistort(img)
img = np.copy(img)
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 1)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
def perspective_warp(img, dst_size=(1280,720),
src=np.float32([(0.43,0.65),(0.58,0.65),(0.1,1),(1,1)]),
dst=np.float32([(0,0), (1, 0), (0,1), (1,1)])):
img_size = np.float32([(img.shape[1], img.shape[0])])
src = src * img_size
dst = dst * np.float32(dst_size)
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, dst_size)
return warped
def inv_perspective_warp(img, dst_size=(1280,720),
src=np.float32([(0,0), (1, 0), (0,1), (1,1)]),
dst=np.float32([(0.43,0.65),(0.58,0.65),(0.1,1),(1,1)])):
img_size = np.float32([(img.shape[1], img.shape[0])])
src = src * img_size
dst = dst * np.float32(dst_size)
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, dst_size)
return warped
def get_hist(img):
hist = np.sum(img[img.shape[0]//2:,:], axis=0)
return hist
def sliding_window(img, nwindows=9, margin=150, minpix=1, draw_windows=True):
# Implementation of sliding window algorithm
pass
def get_curve(img, leftx, rightx):
# Calculate curvature and vehicle offset
pass
def draw_lanes(img, left_fit, right_fit):
# Draw lane lines on the image
pass
def vid_pipeline(img):
# Video processing pipeline
pass
# Calibration
undistort_img()
# Process video
myclip = VideoFileClip(r'C:\Users\Atif Traders\Documents\Curved-Lane-Lines-master\project_video.mp4')
output_vid = 'output.mp4'
clip = myclip.fl_image(vid_pipeline)
clip.write_videofile(output_vid, audio=False)