Skip to content

Commit

Permalink
coreml: improve face recognition concurrency
Browse files Browse the repository at this point in the history
  • Loading branch information
koush committed Apr 5, 2024
1 parent b676c27 commit b36783d
Showing 1 changed file with 76 additions and 45 deletions.
121 changes: 76 additions & 45 deletions plugins/coreml/src/vision/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,25 @@
from __future__ import annotations

import asyncio
from asyncio import Future
import base64
import concurrent.futures
import os
from typing import Any, Tuple
from typing import Any, Tuple, List

import coremltools as ct
import numpy as np
import Quartz
import scrypted_sdk
from Foundation import NSData, NSMakeSize
from PIL import Image, ImageOps
from scrypted_sdk import Setting, SettingValue
from scrypted_sdk import (
Setting,
SettingValue,
ObjectDetectionSession,
ObjectsDetected,
ObjectDetectionResult,
)

import Vision
from predict import Prediction, PredictPlugin, from_bounding_box
Expand Down Expand Up @@ -114,7 +121,21 @@ def detect_face_handler(request, error):
if error:
loop.call_soon_threadsafe(future.set_exception, Exception())
else:
loop.call_soon_threadsafe(future.set_result, observations)
objs = []
for o in observations:
confidence = o.confidence()
bb = o.boundingBox()
origin = bb.origin
size = bb.size

l = origin.x * input.width
t = (1 - origin.y - size.height) * input.height
w = size.width * input.width
h = size.height * input.height
prediction = Prediction(0, confidence, from_bounding_box((l, t, w, h)))
objs.append(prediction)

loop.call_soon_threadsafe(future.set_result, objs)

request = (
Vision.VNDetectFaceRectanglesRequest.alloc().initWithCompletionHandler_(
Expand All @@ -126,56 +147,66 @@ def detect_face_handler(request, error):
return future

async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
if asyncio.get_event_loop() is self.loop:
future = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.predictVision(input),
)
else:
future = await self.predictVision(input)

observations = await future

objs = []
for o in observations:
confidence = o.confidence()
bb = o.boundingBox()
origin = bb.origin
size = bb.size

l = origin.x * input.width
t = (1 - origin.y - size.height) * input.height
w = size.width * input.width
h = size.height * input.height
prediction = Prediction(0, confidence, from_bounding_box((l, t, w, h)))
objs.append(prediction)

if confidence < 0.7:
continue
future = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.predictVision(input),
)

objs = await future
ret = self.create_detection_result(objs, src_size, cvss)
return ret

face = (
input.crop((l, t, l + w, t + h))
.copy()
.convert("RGB")
.resize((160, 160), Image.BILINEAR)
async def setEmbedding(self, d: ObjectDetectionResult, image: scrypted_sdk.Image):
try:
l, t, w, h = d["boundingBox"]
face = await image.toBuffer(
{
"crop": {
"left": l,
"top": t,
"width": w,
"height": h,
},
"resize": {
"width": 160,
"height": 160,
},
"format": "rgb",
}
)
image_tensor = np.array(face).astype(np.float32).transpose([2, 0, 1])

faceImage = Image.frombuffer("RGB", (160, 160), face)
image_tensor = np.array(faceImage).astype(np.float32).transpose([2, 0, 1])
processed_tensor = (image_tensor - 127.5) / 128.0
processed_tensor = np.expand_dims(processed_tensor, axis=0)

if asyncio.get_event_loop() is self.loop:
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.model.predict({"x_1": processed_tensor}),
)
else:
out_dict = self.model.predict({"x_1": processed_tensor})
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.model.predict({"x_1": processed_tensor}),
)

output = out_dict["var_2167"][0]

b = output.tobytes()
embedding = str(base64.encodebytes(b))
prediction.embedding = embedding
d["embedding"] = embedding
except Exception as e:
import traceback
traceback.print_exc()
pass

async def run_detection_image(
self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession
) -> ObjectsDetected:
ret = await super().run_detection_image(image, detection_session)

futures: List[Future] = []

for d in ret["detections"]:
if d["score"] < 0.7:
continue

futures.append(asyncio.ensure_future(self.setEmbedding(d, image)))

await asyncio.wait(futures)

ret = self.create_detection_result(objs, src_size, cvss)
return ret

0 comments on commit b36783d

Please sign in to comment.