Skip to content

Commit

Permalink
real-time-object-detection-system
Browse files Browse the repository at this point in the history
  • Loading branch information
YassineOuhadi committed Jun 10, 2023
0 parents commit 0d531a9
Show file tree
Hide file tree
Showing 31 changed files with 64,730 additions and 0 deletions.
Binary file added flask-api/aerial-airport.onnx
Binary file not shown.
150 changes: 150 additions & 0 deletions flask-api/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
from flask import Flask, request, jsonify, send_file
import os
import time
import datetime
from ultralytics import YOLO
from core import annotate
import ast


# Loading the YOLO model
#model = YOLO('yolov8n.pt')
#model = YOLO('aerial-airport.onnx')
model = YOLO('coco.onnx')


app = Flask(__name__)

# Define a list to store the objects
objects = []

current_datetime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'POST':
# Check if a file is present in the request
if 'file' not in request.files:
return "No file uploaded"

file = request.files['file']
objects = ast.literal_eval(request.form.getlist('objects')[0]) # Get the list of objects from the request
print("Received objects:", objects)
# Check if the file is empty
if file.filename == '':
return "Empty file uploaded"

# Save the file to a temporary location
uploadedFileName = f"{current_datetime}_{file.filename}"
file_path = os.path.join('incoming', uploadedFileName)
file.save(file_path)

ret_file_path = annotate(model, file_path, file.filename, objects)


# Delay for 5 seconds before returning the file
time.sleep(5)

# Return the processed file to the user
return send_file(ret_file_path, as_attachment=True)


return "Hello World!"

@app.route("/getallobjects", methods=['GET'])
def get_all_objects():
global objects


#objects = [
#{'id': 0, 'name': 'plane'}
#]

objects = [
{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'bicycle'},
{'id': 2, 'name': 'car'},
{'id': 3, 'name': 'motorcycle'},
{'id': 4, 'name': 'airplane'},
{'id': 5, 'name': 'bus'},
{'id': 6, 'name': 'train'},
{'id': 7, 'name': 'truck'},
{'id': 8, 'name': 'boat'},
{'id': 9, 'name': 'traffic light'},
{'id': 10, 'name': 'fire hydrant'},
{'id': 11, 'name': 'stop sign'},
{'id': 12, 'name': 'parking meter'},
{'id': 13, 'name': 'bench'},
{'id': 14, 'name': 'bird'},
{'id': 15, 'name': 'cat'},
{'id': 16, 'name': 'dog'},
{'id': 17, 'name': 'horse'},
{'id': 18, 'name': 'sheep'},
{'id': 19, 'name': 'cow'},
{'id': 20, 'name': 'elephant'},
{'id': 21, 'name': 'bear'},
{'id': 22, 'name': 'zebra'},
{'id': 23, 'name': 'giraffe'},
{'id': 24, 'name': 'backpack'},
{'id': 25, 'name': 'umbrella'},
{'id': 26, 'name': 'handbag'},
{'id': 27, 'name': 'tie'},
{'id': 28, 'name': 'suitcase'},
{'id': 29, 'name': 'frisbee'},
{'id': 30, 'name': 'skis'},
{'id': 31, 'name': 'snowboard'},
{'id': 32, 'name': 'sports ball'},
{'id': 33, 'name': 'kite'},
{'id': 34, 'name': 'baseball bat'},
{'id': 35, 'name': 'baseball glove'},
{'id': 36, 'name': 'skateboard'},
{'id': 37, 'name': 'surfboard'},
{'id': 38, 'name': 'tennis racket'},
{'id': 39, 'name': 'bottle'},
{'id': 40, 'name': 'wine glass'},
{'id': 41, 'name': 'cup'},
{'id': 42, 'name': 'fork'},
{'id': 43, 'name': 'knife'},
{'id': 44, 'name': 'spoon'},
{'id': 45, 'name': 'bowl'},
{'id': 46, 'name': 'banana'},
{'id': 47, 'name': 'apple'},
{'id': 48, 'name': 'sandwich'},
{'id': 49, 'name': 'orange'},
{'id': 50, 'name': 'broccoli'},
{'id': 51, 'name': 'carrot'},
{'id': 52, 'name': 'hot dog'},
{'id': 53, 'name': 'pizza'},
{'id': 54, 'name': 'donut'},
{'id': 55, 'name': 'cake'},
{'id': 56, 'name': 'chair'},
{'id': 57, 'name': 'couch'},
{'id': 58, 'name': 'potted plant'},
{'id': 59, 'name': 'bed'},
{'id': 60, 'name': 'dining table'},
{'id': 61, 'name': 'toilet'},
{'id': 62, 'name': 'tv'},
{'id': 63, 'name': 'laptop'},
{'id': 64, 'name': 'mouse'},
{'id': 65, 'name': 'remote'},
{'id': 66, 'name': 'keyboard'},
{'id': 67, 'name': 'cell phone'},
{'id': 68, 'name': 'microwave'},
{'id': 69, 'name': 'oven'},
{'id': 70, 'name': 'toaster'},
{'id': 71, 'name': 'sink'},
{'id': 72, 'name': 'refrigerator'},
{'id': 73, 'name': 'book'},
{'id': 74, 'name': 'clock'},
{'id': 75, 'name': 'vase'},
{'id': 76, 'name': 'scissors'},
{'id': 77, 'name': 'teddy bear'},
{'id': 78, 'name': 'hair drier'},
{'id': 79, 'name': 'toothbrush'}
]

# Return the list of objects as JSON response
return jsonify(objects)

if __name__ == "__main__":
app.run()
Binary file added flask-api/coco.onnx
Binary file not shown.
122 changes: 122 additions & 0 deletions flask-api/core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
import numpy as np
import random
from ultralytics import YOLO
from deep_sort.deep_sort.tracker import Tracker
from deep_sort.deep_sort.nn_matching import NearestNeighborDistanceMetric
from deep_sort.tools.generate_detections import create_box_encoder
from deep_sort.deep_sort.detection import Detection
import cv2
import os
import datetime


def annotate_vid(model, file_path, filename, objects):
# initiating DeepSort's tracker
similarity_metric = NearestNeighborDistanceMetric("cosine", 0.4, None)
tracker = Tracker(similarity_metric)

# Feature extractor
feature_extractor = create_box_encoder("./deep_sort/mars-small128.pb", batch_size=1)
color_tracks = {}

# loading the appropriate openCV parser and video generator
cap = cv2.VideoCapture(file_path)
ret, frame = cap.read()

current_datetime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
detectedFileName = f"{current_datetime}_{filename}"
inter_file_path = "outcoming/inter_" + detectedFileName
ret_file_path = "outcoming/" + detectedFileName

inter_file = open(inter_file_path, "x")
inter_file.close()
cap_out = cv2.VideoWriter(inter_file_path, cv2.VideoWriter_fourcc(*'mp4v'), cap.get(cv2.CAP_PROP_FPS),
(frame.shape[1], frame.shape[0]))

# Getting the YOLO results of the first frame
resultss = model(frame)

while ret:
for results in resultss:

# Generating a list of Detection(s) for the current frame
bboxes, features, scores = ([] for k in range(3))
for bbox_wrapper in results.boxes.data.tolist():
min_x, min_y, max_x, max_y, score, class_id = bbox_wrapper
for id in objects:
if class_id == int(id): # Filter detections by class ID
tlwh = (min_x, min_y, max_x - min_x, max_y - min_y)
bboxes.append(np.asarray(tlwh))
scores.append(score)
features = feature_extractor(frame, bboxes)

detections = []
for k in range(len(bboxes)):
detections.append(Detection(bboxes[k], scores[k], features[k]))

# Feeding the Detection(s) to DeepSort's Tracker
tracker.predict()
tracker.update(detections)

# Adding the assigned (colored) detections to the frame
for track in tracker.tracks:
if track.track_id not in color_tracks:
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
while color in color_tracks.values():
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
color_tracks[track.track_id] = color

cv2.rectangle(frame, (int(track.to_tlbr()[0]), int(track.to_tlbr()[1])),
(int(track.to_tlbr()[2]), int(track.to_tlbr()[3])), color_tracks[track.track_id], 3)
# print(track.to_tlbr())

# Adding the updated frame to the output video
cap_out.write(frame)

# Getting the next frame
ret, frame = cap.read()
resultss = model(frame)

cap.release()
cap_out.release()

os.system("ffmpeg -i {} -c:v libx264 -crf 17 -b:v 6000k -maxrate 6000K -bufsize 4M -movflags -faststart -preset veryfast -dn {}".format(inter_file_path, ret_file_path))
os.remove(inter_file_path)

return ret_file_path


def annotate_img(model, file_path, filename, objects):
image = cv2.imread(file_path)

resultss = model(file_path)
for result in resultss:
for box_wrapper in result.boxes.data.tolist():
min_x, min_y, max_x, max_y, score, class_id = box_wrapper

for id in objects:
if class_id == int(id): # Filter detections by class ID
tlbr = (min_x, min_y, max_x, max_y)
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
cv2.rectangle(image, (int(min_x), int(min_y)), (int(max_x), int(max_y)), color, 3)

current_datetime = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
detectedFileName = f"{current_datetime}_{filename}"
ret_file_path = "outcoming/" + detectedFileName

ret_file = open(ret_file_path, "x")
ret_file.close()
cv2.imwrite(ret_file_path, image)

return ret_file_path


def annotate(model, file_path, filename, objects):

supported_image_exts = {'jpg', 'jpeg', 'png', 'gif', 'bmp', 'webp'}

for ext in supported_image_exts:
if filename.endswith(ext):
return annotate_img(model, file_path, filename, objects)

return annotate_vid(model, file_path, filename, objects)
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit 0d531a9

Please sign in to comment.