Skip to content
Permalink
Browse files

Added a WebWorker example.

  • Loading branch information
Marcel Klammer
Marcel Klammer committed Nov 15, 2019
1 parent a13c6c4 commit eb24439fbf2b92429485a581225c4bcde0440558
Showing with 359 additions and 0 deletions.
  1. +51 −0 js/worker/brfv5__init__worker.js
  2. +124 −0 js/worker/setup__worker.js
  3. +184 −0 minimal__webworker__track_one_face.html
@@ -0,0 +1,51 @@
// Set the BRFv5 import and library name here.
// Also set your own appId for reference.

importScripts('../brfv5/brfv5_js_tk141119_v5.1.0_trial_no_modules.js')

const _libraryName = 'brfv5_js_tk141119_v5.1.0_trial.brfv5'
const _appId = 'brfv5.browser.worker' // (mandatory): 8 to 64 characters, a-z . 0-9 allowed

const brfv5 = {}

let _brfv5Manager = null
let _brfv5Config = null

// numChunksToLoad: can be anything from 4 to 8.
const loadBRFv5Model = (modelName, numChunksToLoad, pathToModels = '', appId = null, onProgress = null) => {

if(!modelName) { throw 'Please provide a modelName.' }

return new Promise((resolve, reject) => {

if(_brfv5Manager && _brfv5Config) {

resolve({ brfv5Manager: _brfv5Manager, brfv5Config: _brfv5Config })

} else {

try {

brfv5.appId = appId ? appId : _appId
brfv5.binaryLocation = pathToModels + _libraryName
brfv5.modelLocation = pathToModels + modelName + '_c'
brfv5.modelChunks = numChunksToLoad // 4, 6, 8
brfv5.binaryProgress = onProgress
brfv5.binaryError = (e) => { reject(e) }
brfv5.onInit = (brfv5Manager, brfv5Config) => {

_brfv5Manager = brfv5Manager
_brfv5Config = brfv5Config

resolve({ brfv5Manager: _brfv5Manager, brfv5Config: _brfv5Config })
}

brfv5Module(brfv5)

} catch(e) {

reject(e)
}
}
})
}
@@ -0,0 +1,124 @@
importScripts('./brfv5__init__worker.js')

let _width = 0 // will be set by main thread
let _height = 0

const sentTrackFaces = function() { self.postMessage("trackFaces"); }

self.addEventListener('message', function(e) {

let dataBuffer = new Int32Array(e.data);

if(dataBuffer.length === 2) {

setSize(dataBuffer[0], dataBuffer[1])

} else if(_width > 0 && dataBuffer.length === _width * _height) {

_brfv5Manager.update({ data: new Uint8ClampedArray(e.data), width: _width, height: _height })

const faces = _brfv5Manager.getFaces();

if(faces.length > 0) {

const face = faces[0]
const vertices = new Float32Array(face.vertices.length);

for(let k = 0; k < face.vertices.length; k++) {

vertices[k] = face.vertices[k];
}

self.postMessage(vertices.buffer, [vertices.buffer]);
}
}

}, false);

const setSize = (width, height) => {

_width = width
_height = height

configureTracking()
}

loadBRFv5Model('68l', 8, '../brfv5/models/', _appId,
(progress) => { console.log(progress) }).then(({ brfv5Manager, brfv5Config }) => {

console.log('loadBRFv5Model: done')

_brfv5Manager = brfv5Manager
_brfv5Config = brfv5Config

configureTracking()

}).catch((e) => { console.error('BRFv5 failed: ', e) })

const configureTracking = () => {

if(_brfv5Config !== null && _width > 0) {

// Camera stream and BRFv5 are ready. Now configure. Internal defaults are set for a 640x480 resolution.
// So the following isn't really necessary.

const brfv5Config = _brfv5Config
const imageWidth = _width
const imageHeight = _height

const inputSize = imageWidth > imageHeight ? imageHeight : imageWidth

// Setup image data dimensions

brfv5Config.imageConfig.inputWidth = imageWidth
brfv5Config.imageConfig.inputHeight = imageHeight

const sizeFactor = inputSize / 480.0

// Set face detection region of interest and parameters scaled to the image base size.

brfv5Config.faceDetectionConfig.regionOfInterest.setTo(0, 0, imageWidth, imageHeight)

brfv5Config.faceDetectionConfig.minFaceSize = 144 * sizeFactor
brfv5Config.faceDetectionConfig.maxFaceSize = 480 * sizeFactor

if(imageWidth < imageHeight) {

// Portrait mode: probably smartphone, faces tend to be closer to the camera, processing time is an issue,
// so save a bit of time and increase minFaceSize.

brfv5Config.faceDetectionConfig.minFaceSize = 240 * sizeFactor
}

// Set face tracking region of interest and parameters scaled to the image base size.

brfv5Config.faceTrackingConfig.regionOfInterest.setTo(0, 0, imageWidth, imageHeight)

brfv5Config.faceTrackingConfig.minFaceScaleStart = 50.0 * sizeFactor
brfv5Config.faceTrackingConfig.maxFaceScaleStart = 320.0 * sizeFactor

brfv5Config.faceTrackingConfig.minFaceScaleReset = 35.0 * sizeFactor
brfv5Config.faceTrackingConfig.maxFaceScaleReset = 420.0 * sizeFactor

brfv5Config.faceTrackingConfig.confidenceThresholdReset = 0.001

brfv5Config.faceTrackingConfig.enableStabilizer = true

brfv5Config.faceTrackingConfig.maxRotationXReset = 35.0
brfv5Config.faceTrackingConfig.maxRotationYReset = 45.0
brfv5Config.faceTrackingConfig.maxRotationZReset = 34.0

brfv5Config.faceTrackingConfig.numTrackingPasses = 3
brfv5Config.faceTrackingConfig.enableFreeRotation = true
brfv5Config.faceTrackingConfig.maxRotationZReset = 999.0

brfv5Config.faceTrackingConfig.numFacesToTrack = 1
brfv5Config.enableFaceTracking = true

console.log('configureTracking:', _brfv5Config)

_brfv5Manager.configure(_brfv5Config)

sentTrackFaces();
}
}
@@ -0,0 +1,184 @@
<!DOCTYPE html>
<html lang="en">
<head>

<meta charset="utf-8">
<title>Beyond Reality Face SDK - BRFv5 - Face Tracking for Browser/JavaScript - Minimal WebWorker Example (no modules)</title>

<style>
html, body { width: 100%; height: 100%; background-color: #ffffff; margin: 0; padding: 0; overflow: hidden; }
canvas {position: absolute; }
</style>
</head>

<body>

<!--
WebWorker example.
The UI side includes the webcam video as well as two
canvases, one for the image data and one for drawing the results.
In this main thread we activate the camera and draw the stream to the imageData canvas.
The pixel data is send to the worker for analysis.
The UI side waits for the results of the last sent pixel data before sending new data.
The worker will send the results once it's finished.
The webcam image will be slightly out of sync with the results, of course.
-->

<video id="_webcam" style="display: none;" playsinline></video>
<canvas id="_imageData"></canvas>
<canvas id="_resultData"></canvas>

<script>

if(!window.Worker) { throw "No worker support" }

const worker = new Worker("./js/worker/setup__worker.js")

worker.addEventListener("error", e => console.error(e), false)

const sendSize = function() {

const sizeData = new Uint32Array(2)
sizeData.set([_width, _height])

worker.postMessage(sizeData.buffer, [sizeData.buffer])
}

worker.addEventListener('message', function(e) {

// console.log("js message", e)

if(e.data === "trackFaces") {

// start signal, brf lib is ready.
trackFaces()

} else {

// Got results (vertices) for the last sent pixel data. Draw it.

_resultDataCtx.clearRect(0, 0, _width, _height);

drawVertices(_resultDataCtx, new Float32Array(e.data), '#00a0ff', 2.0)

_gotResults = true // unlock
}

}, false)

// References to the video and canvases.
const _webcam = document.getElementById('_webcam')
const _imageData = document.getElementById('_imageData')
const _resultData = document.getElementById('_resultData')

let _imageDataCtx = null
let _resultDataCtx = null

let _width = 0
let _height = 0
let _gotResults = true

const openCamera = () => {

console.log('openCamera')

return new Promise((resolve, reject) => {

window.navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720, frameRate: 30, facingMode: 'user'} })
.then((mediaStream) => {

_webcam.srcObject = mediaStream
_webcam.play().then(() => { resolve({ width: _webcam.videoWidth, height: _webcam.videoHeight }) }).catch((e) => { reject(e) })

}).catch((e) => { reject(e) })
})
}

const trackFaces = () => {

if(!_imageData) { return }

const ctx = _imageDataCtx

ctx.setTransform(-1.0, 0, 0, 1, _width, 0) // A virtual mirror should be... mirrored
ctx.drawImage(_webcam, 0, 0, _width, _height)
ctx.setTransform(1.0, 0, 0, 1, 0, 0) // unmirror to draw the results

if(_gotResults) {

_gotResults = false

const _dataArray = new Uint8ClampedArray(_width * _height * 4)
_dataArray.set(ctx.getImageData(0, 0, _width, _height).data)

worker.postMessage(_dataArray.buffer, [_dataArray.buffer])
}

requestAnimationFrame(trackFaces)
}

openCamera().then(({ width, height }) => {

console.log('openCamera: done: ' + width + 'x' + height)

_width = width
_height = height

_imageData.width = _width
_imageData.height = _height
_imageDataCtx = _imageData.getContext("2d")

_resultData.width = _width
_resultData.height = _height
_resultDataCtx = _resultData.getContext("2d")

sendSize()

}).catch((e) => { if(e) { console.error('Camera failed: ', e) } })

const drawVertices = (ctx, vertices, color, radius) => {

ctx.strokeStyle = null
ctx.fillStyle = getColor(color, 1.0)

let _radius = radius || 2.0

for(let i = 0; i < vertices.length; i += 2) {

ctx.beginPath()
ctx.arc(vertices[i], vertices[i + 1], _radius, 0, 2 * Math.PI)
ctx.fill()
}
}

const getColor = (color, alpha) => {

const colorStr = color + ''

if(colorStr.startsWith('rgb')) {

return color
}

if(colorStr.startsWith('#')) {

color = parseInt('0x' + colorStr.substr(1))
}

return 'rgb(' +
(((color >> 16) & 0xff).toString(10)) + ', ' +
(((color >> 8) & 0xff).toString(10)) + ', ' +
(((color) & 0xff).toString(10)) + ', ' + alpha +')'
}

</script>

</body>

</html>

0 comments on commit eb24439

Please sign in to comment.
You can’t perform that action at this time.