Large diffs are not rendered by default.

Binary file not shown.
@@ -15,7 +15,7 @@ def __init__(self, model_file, scaler_file,
cell_per_step=2,
threshold=4.5,
spatial_size=(32,32)):
self.svc_model = None
self.model = None
self.scaler = None
assert(os.path.exists(model_file))
assert(os.path.exists(scaler_file))
Deleted file not rendered
@@ -4,7 +4,7 @@
import matplotlib.image as mpimg
import numpy as np
from scipy.ndimage.measurements import label

CAR_CLASS_ID = 2
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True, hog_channel=3):
# return with two outputs vis==True
@@ -234,3 +234,57 @@ def draw_bounding_boxes_from_labels(img, labels, area_threshold=2000):
cv2.rectangle(img, bbox[0], bbox[1], (0,255,0), 10)
return img

def getOutputLayersYoloV3(model):
# output layers is the layer that is not connected to any next layer
layer_names = model.getLayerNames()
# there are multiple output layers in the YOVOV3 so we tried to get all of them here
# so we can check for all the detections
output_layers = [layer_names[i[0] - 1] for i in model.getUnconnectedOutLayers()]
return output_layers

def process_each_frame_yolov3(frame, model, ystart, ystop, confidence_threshold, heat_map, width=416, height=416):
# create a input blob from frame
print(frame[ystart:ystop,:].shape, width, height)
blob = cv2.dnn.blobFromImage(frame[ystart:ystop,:], 1/255, (width, height), [0,0,0], 1, crop=False)

# run model through input
model.setInput(blob)
# get all the layers from models
output_layers = getOutputLayersYoloV3(model)
print(output_layers)
results = model.forward(output_layers)
# process each outputs which is a long tensor
# which first 4 elements are bounding boxes position
# the scores for each classes are from 5th elements
frame_width = frame.shape[1]
frame_height = ystop - ystart
bboxes = []
for result in results:
for detection in result:
# go through each detection
#for detection in result:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
# only detect cars for now
if classId != 2 or confidence < confidence_threshold:
continue
print("Class Id: %d" % classId)
print("Confidence: %.2f" % confidence)
# now we have a car detected. need to draw the bounding box
# first rescale the bounding box
center_x = int(detection[0] * frame_width)
center_y = int(detection[1] * frame_height)
box_width = int(detection[2] * frame_width)
box_height = int(detection[3] * frame_height)
x1 = int(center_x - box_width / 2)
y1 = int(center_y - box_height / 2) + ystart
x2 = int(center_x + box_width / 2)
y2 = int(center_y + box_height / 2) + ystart
bboxes.append(((x1, y1),(x2,y2)))
heat_map[y1:y2,x1:x2] += 1
return heat_map




@@ -0,0 +1,81 @@
from vehicle_detection_utils import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import cv2
import pickle
from scipy.ndimage.measurements import label
from queue import Queue
CAR_CLASS_ID = 3
CONFIDENCE_THRESHOLD = 0.5
class YoloV3VehicleDetectionPipeline(object):
def __init__(self,
model_file,
weights_file,
yolo_score_threshold=0.5,
heat_map_threshold=0.5
):
assert(os.path.exists(model_file))
assert(os.path.exists(weights_file))
self.model = cv2.dnn.readNetFromDarknet(model_file, weights_file)
self.model.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self.model.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
self.yolo_score_threshold = yolo_score_threshold
self.heat_map_threshold = heat_map_threshold

def run(self, video_file, save_video=False, debug=True):
assert os.path.exists(video_file)
print ("Start Video Processing")
# open the video and feed the frame here
cap = cv2.VideoCapture(video_file)
if save_video:
codec = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('project_output_yolo.avi', codec, 20.0, (1280, 720))
# using for quick debug to skip frames
frame_count = 0
skip = 0
while cap.isOpened():
frame_count += 1
ret, orig_frame = cap.read()
if frame_count < skip:
continue
# if frame is valid then run it through the pipe line
if not ret:
break
labels = None
heat_map = np.zeros((orig_frame.shape[0], orig_frame.shape[1]))
# process each frame with YOLO
heat_map = process_each_frame_yolov3(orig_frame,
self.model,
300, 620, self.yolo_score_threshold, heat_map)
# add heatmap to queue
if heat_map is not None:
print ("heat map val: ",np.max(heat_map))
if debug:
cv2.imshow('heatmap', heat_map)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# filter out heatmap threshold here
labels = label(heat_map)
draw_bounding_boxes_from_labels(orig_frame, labels)

if save_video:
out.write(orig_frame)
# show image
if debug:
cv2.imshow('Frame', orig_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if save_video:
out.release()
print ("Clean up and Close")
cap.release()

if __name__ == "__main__":
pipeline = YoloV3VehicleDetectionPipeline(model_file='../yolov3_models/yolov3_320.cfg',
weights_file='../yolov3_models/yolov3_320.weights')
pipeline.run('../project_video.mp4', save_video=True, debug=False)