Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multiprocessing with KNN displaying no output #937

Open
kumarsatyamdhiman opened this issue Sep 22, 2019 · 0 comments
Open

Multiprocessing with KNN displaying no output #937

kumarsatyamdhiman opened this issue Sep 22, 2019 · 0 comments

Comments

@kumarsatyamdhiman
Copy link

kumarsatyamdhiman commented Sep 22, 2019

  • face_recognition version:
  • Python version: 3.
  • Operating System: Ubuntu 18.

Description

Hi all, I am trying use the Knn classifier in facerec_from_webcam_multiprocessing.py instead of passing encodings like

Global.known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
Global.known_face_names = [
"Barack Obama",
"Joe Biden"
]

as knn assures accuracy.

######################### the Code ###################################
import face_recognition
import cv2
from multiprocessing import Process, Manager, cpu_count, set_start_method
import time
import threading
import platform
import numpy as np
import pickle
import subprocess
import math

Get next worker's id

def next_id(current_id, worker_num):
if current_id == worker_num:
return 1
else:
return current_id + 1

Get previous worker's id

def prev_id(current_id, worker_num):
if current_id == 1:
return worker_num
else:
return current_id - 1

A subprocess use to capture frames.

def capture(read_frame_list, Global, worker_num):
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture("rtsp://192.168.0.111/live/ch00_0")
# video_capture.set(3, 640) # Width of the frames in the video stream.
# video_capture.set(4, 480) # Height of the frames in the video stream.
# video_capture.set(5, 30) # Frame rate.
print("Width: %d, Height: %d, FPS: %d" % (video_capture.get(3), video_capture.get(4), video_capture.get(5)))

while not Global.is_exit:
    # If it's time to read a frame
    if Global.buff_num != next_id(Global.read_num, worker_num):
        # Grab a single frame of video
        ret, frame = video_capture.read()
        read_frame_list[Global.buff_num] = frame
        Global.buff_num = next_id(Global.buff_num, worker_num)
    else:
        time.sleep(0.01)

# Release webcam
video_capture.release()

Many subprocess use to process frames.

def process(worker_id, read_frame_list, write_frame_list, Global, worker_num):

while not Global.is_exit:

    # Wait to read
    while Global.read_num != worker_id or Global.read_num != prev_id(Global.buff_num, worker_num):
        time.sleep(0.01)

    # Delay to make the video look smoother
    time.sleep(Global.frame_delay)

    # Read a single frame from frame list
    Global.frame_process = read_frame_list[worker_id]
    frame_process = read_frame_list[worker_id]

    # Expect next worker to read frame
    Global.read_num = next_id(Global.read_num, worker_num)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_frame = frame_process[:, :, ::-1]

    def predict(rgb_frame, knn_clf=None, model_path=None, distance_threshold=0.45):
    

        if knn_clf is None and model_path is None:
            raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

# Load a trained KNN model (if one was passed in)
        if knn_clf is None:
            with open(model_path, 'rb') as f:
                knn_clf = pickle.load(f)

# find face locations from frame    
        X_face_locations = face_recognition.face_locations(rgb_frame)

# If no faces are found in the image, return an empty result.
        if len(X_face_locations) == 0:
            return []

# Find encodings for faces in the test iamge
        faces_encodings = face_recognition.face_encodings(rgb_frame, known_face_locations=X_face_locations)

# Use the KNN model to find the best matches for the test face
        closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=5)
        are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

# Predict classes and remove classifications that aren't within the threshold
        return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]


    while True:
        predictions = predict(rgb_frame, model_path="trained_knn_model.clf")
    
    
  
    for name, (top, right, bottom, left) in predictions:
    # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 1
        right *= 1
        bottom *= 1
        left *= 1

        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(rgb_frame, name, (left + 6, bottom - 2), font, 0.5, (255,255,0), 1)

   # Wait to write
    while Global.write_num != worker_id:
        time.sleep(0.01)

    # Send frame to global
    write_frame_list[worker_id] = frame_process

    # Expect next worker to write frame
    Global.write_num = next_id(Global.write_num, worker_num)

if name == 'main':

# Fix Bug on MacOS
if platform.system() == 'Darwin':
    set_start_method('forkserver')

# Global variables
Global = Manager().Namespace()
Global.buff_num = 1
Global.read_num = 1
Global.write_num = 1
Global.frame_delay = 0
Global.is_exit = False
read_frame_list = Manager().dict()
write_frame_list = Manager().dict()


# Number of workers (subprocess use to process frames)
if cpu_count() > 2:
    worker_num = cpu_count() - 1  # 1 for capturing frames
else:
    worker_num = 2

# Subprocess list
p = []

# Create a thread to capture frames (if uses subprocess, it will crash on Mac)
p.append(threading.Thread(target=capture, args=(read_frame_list, Global, worker_num,)))
p[0].start()


# Create workers
for worker_id in range(1, worker_num + 1):
    p.append(Process(target=process, args=(worker_id, read_frame_list, write_frame_list, Global, worker_num,)))
    p[worker_id].start()

# Start to show video
last_num = 1
fps_list = []
tmp_time = time.time()
while not Global.is_exit:
    while Global.write_num != last_num:
        last_num = int(Global.write_num)

        # Calculate fps
        delay = time.time() - tmp_time
        tmp_time = time.time()
        fps_list.append(delay)
        if len(fps_list) > 5 * worker_num:
            fps_list.pop(0)
        fps = len(fps_list) / numpy.sum(fps_list)
        print("fps: %.2f" % fps)

        # Calculate frame delay, in order to make the video look smoother.
        # When fps is higher, should use a smaller ratio, or fps will be limited in a lower value.
        # Larger ratio can make the video look smoother, but fps will hard to become higher.
        # Smaller ratio can make fps higher, but the video looks not too smoother.
        # The ratios below are tested many times.
        if fps < 6:
            Global.frame_delay = (1 / fps) * 0.75
        elif fps < 20:
            Global.frame_delay = (1 / fps) * 0.5
        elif fps < 30:
            Global.frame_delay = (1 / fps) * 0.25
        else:
            Global.frame_delay = 0

        # Display the resulting image
        cv2.imshow('Video', write_frame_list[prev_id(Global.write_num, worker_num)])

    # Hit 'q' on the keyboard to quit!
    if cv2.waitKey(1) & 0xFF == ord('q'):
        Global.is_exit = True
        break

    time.sleep(0.01)

# Quit
cv2.destroyAllWindows()

###############################the Code #######################################

Any Clue?

@kumarsatyamdhiman kumarsatyamdhiman changed the title Multiprocessing with KNN Multiprocessing with KNN displaying no output Sep 24, 2019
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant