Skip to content
Permalink
Branch: master
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
452 lines (386 sloc) 13.4 KB
<html>
<head>
<title>OpenCV Face Detection example</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<style>
body, html {
padding: 0;
margin: 0;
width: 100%;
height: 100%;
overflow: hidden;
-webkit-overflow-scrolling: touch;
-webkit-user-select: none;
user-select: none;
}
#target {
width: 100%;
height: 100%;
position: absolute;
}
#video_canvas {
transform: translate(-50%, 0);
opacity:0.5;
position:absolute;
top: 0;
left: 0;
border:1px solid green
}
.text-box {
position: absolute;
top: 5%;
left: 50%;
color: white;
background: rgba(27,55,55,0.75);;
outline: 1px solid rgba(127,255,255,0.75);
border: 0px;
padding: 5px 10px;
transform: translate(-50%, 0%);
font-size: 0.8em;
}
.common-message {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
font-size: 10px;
}
img.crosshair {
position: absolute;
top: 50%;
left: 50%;
margin-left: -32px;
margin-top: -32px;
}
</style>
<link rel="stylesheet" href="../common.css"/>
<script src="../libs/three.js"></script>
<script src="../libs/stats.js"></script>
<!--
<script type="module" src="../../polyfill/XRPolyfill.js"></script>
<script nomodule src="../../dist/webxr-polyfill.js"></script>
-->
<script src="../../dist/webxr-polyfill.js"></script>
<script src="../common.js"></script>
</head>
<body>
<!-- place to render detected face rectanges over the video frame -->
<canvas id="video_canvas" width="100%" height="100%"> </canvas>
<div id="target" />
<div onclick="hideMe(this)" id="description">
<h2>OpenCV Face Detection Demo</h2>
<h5>(click to dismiss)</h5>
<p>Use OpenCV to find faces and draw boxes around them in 2D.</p>
</div>
<script>
var cvStatusTxt = "";
// set up for collecting different stats
var beginTime = ( performance || Date ).now(), prevTime = beginTime, frames = 0;
// set up some stats, including a new pane for CV fps
var stats = new Stats();
stats.domElement.style.cssText = 'position:fixed;top:2%;right:2%;cursor:pointer;opacity:0.9;z-index:10000';
var cvPanel = stats.addPanel( new Stats.Panel( 'CV fps', '#ff8', '#221' ) );
stats.showPanel( 2 ); // 0: fps, 1: ms, 2: mb, 3+: custom
// a method to update a new panel for displaying CV FPS
var updateCVFPS = function () {
frames ++;
var time = ( performance || Date ).now();
if ( time >= prevTime + 1000 ) {
cvPanel.update( ( frames * 1000 ) / ( time - prevTime ), 100 );
prevTime = time;
frames = 0;
}
beginTime = time;
}
document.body.appendChild( stats.dom );
// keep track of what we think the view size is so we can adjust the
// overlay used to render the 2D detection results
var viewWidth = 0;
var viewHeight = 0;
// various variables to hold values for statistics collected from the worker
var cvStartTime = 0;
var cvAfterMatTime = 0;
var cvAfterResizeTime = 0;
var cvEndTime = 0;
var cvMatTime = 0;
var cvFaceTime = 0
var cvResizeTime = 0;
var cvIdleTime = 0;
// has openCV loaded?
var openCVready = false;
// for debugging, show the image we did the CV on
var showCVImage = false;
var cvImageBuff = null
var tempVideoCanvas = document.createElement('canvas');
var tempVideoCanvasCtx = tempVideoCanvas.getContext('2d');
// a 2D buffer to show the 2D boxes in, and the video in
var cvImageDiv = document.getElementById("video_canvas");
var cvImageCtx = cvImageDiv.getContext('2d');
class ARAnchorExample extends XRExampleBase {
constructor(domElement){
super(domElement, false, true, true)
this.textBox = document.createElement('span')
this.textBox.setAttribute('class', 'text-box')
this.textBox.innerText = '0.0'
this.faceRects = [];
this.eyeRects = [];
this.lightEstimate = 0;
this.el.appendChild(this.textBox)
// some temp variables
this.rotation = -1;
this.triggerResize = true;
// try to notice all resizes of the screen
window.addEventListener('resize', () => {
console.log("resize, trigger adjust canvas size")
this.triggerResize = true;
})
// new worker
this.worker = new Worker ("worker.js")
this.worker.onmessage = (ev) => {
switch (ev.data.type) {
case "cvFrame":
// finished with a frame!
var videoFrame = XRVideoFrame.createFromMessage(ev)
// get the face rectangles
this.faceRects = ev.data.faceRects;
// timing
cvEndTime = ev.data.time;
cvFaceTime = cvEndTime - cvAfterResizeTime;
// see if the video frame sizes have changed, since
// we'll need to adjust the 2D overlay
var rotation = videoFrame.camera.cameraOrientation;
var buffer = videoFrame.buffer(0)
var width = buffer.size.width
var height = buffer.size.height
if (this.triggerResize || this.rotation != rotation) {
this.triggerResize = false;
this.rotation = rotation;
this.adjustRenderCanvasSize(rotation, width, height)
}
// are we showing the image (for debugging) or just the rectangles?
if (showCVImage) {
this.showVideoFrame(videoFrame)
} else {
cvImageCtx.clearRect(0, 0, cvImageDiv.width, cvImageDiv.height);
}
// show the rectangles
for (let i = 0; i < this.faceRects.length; i++) {
let rect = this.faceRects[i];
cvImageCtx.strokeRect(rect.x, rect.y, rect.width , rect.height);
}
// update CV fps, and release the image
updateCVFPS();
videoFrame.release();
break;
case "cvStart":
// request the next frame when the worker starts working on the current
// one, to pipeline a bit
this.requestVideoFrame();
// has there been a delay since the last frame was finished processing?
// this shouldn't happen, but is a sign that the background tasks are
// being throttled
cvStartTime = ev.data.time;
if (cvEndTime > 0) {
cvIdleTime = cvStartTime - cvEndTime;
}
break
case "cvAfterMat":
cvAfterMatTime = ev.data.time;
cvMatTime = cvAfterMatTime - cvStartTime
break;
case "cvAfterResize":
cvAfterResizeTime = ev.data.time;
cvResizeTime = cvAfterResizeTime - cvAfterMatTime
break;
case "cvReady":
// opencv is loaded and ready!
console.log('OpenCV.js is ready');
openCVready = true
break;
case "cvStatus":
// opencv sends status messages sometimes
cvStatusTxt = ev.data.msg;
break;
}
}
this.worker.addEventListener('error', (e) => {
console.log("worker error:" + e)
})
}
// new session, tell it about our worker
newSession() {
this.setVideoWorker(this.worker);
}
// when the window or video frame resizes, we need to resize and reposition
// the 2D overlay
adjustRenderCanvasSize (rotation, width, height) {
var cameraAspect;
console.log("adjustRenderCanvasSize: " + rotation + " degrees, " + width + " by " + height)
tempVideoCanvas.width = width;
tempVideoCanvas.height = height;
if(rotation == 90 || rotation == -90) {
cameraAspect = height / width;
cvImageDiv.width = height
cvImageDiv.height = width
} else {
cameraAspect = width / height;
cvImageDiv.width = width
cvImageDiv.height = height
}
// reposition to DIV
var windowWidth = this.session.baseLayer.framebufferWidth;
var windowHeight = this.session.baseLayer.framebufferHeight;
var windowAspect = windowWidth / windowHeight;
var translateX = 0;
var translateY = 0;
if (cameraAspect > windowAspect) {
windowWidth = windowHeight * cameraAspect;
translateX = -(windowWidth - this.session.baseLayer.framebufferWidth)/2;
} else {
windowHeight = windowWidth / cameraAspect;
translateY = -(windowHeight - this.session.baseLayer.framebufferHeight)/2;
}
cvImageDiv.style.width = windowWidth.toFixed(2) + 'px'
cvImageDiv.style.height = windowHeight.toFixed(2) + 'px'
cvImageDiv.style.transform = "translate(" + translateX.toFixed(2) + "px, "+ translateY.toFixed(2) + "px)"
}
// Called during construction
initializeScene(){
// Add a box at the scene origin
let box = new THREE.Mesh(
new THREE.BoxBufferGeometry(0.1, 0.1, 0.1),
new THREE.MeshPhongMaterial({ color: '#DDFFDD' })
)
box.position.set(0, 0, 0)
var axesHelper = AxesHelper( 0.2 );
this.floorGroup.add( axesHelper );
this.floorGroup.add(box)
this.scene.add(new THREE.AmbientLight('#FFF', 0.2))
let directionalLight = new THREE.DirectionalLight('#FFF', 0.6)
directionalLight.position.set(0, 10, 0)
this.scene.add(directionalLight)
}
updateScene(frame){
this.lightEstimate = frame.lightEstimate || 0;
stats.update()
var txt = "<center>"
txt += "ARKit Light Estimate: " + this.lightEstimate.toFixed(2);
txt += "<br>" ;
if (cvStatusTxt.length > 0) {
txt += "OpenCV: " + cvStatusTxt + "<br>"
}
if (openCVready) {
txt += "Looking for faces:<br>"
if (this.faceRects.length > 0) {
txt += "found " + this.faceRects.length.toString() + " faces and/or eyes"
} else {
txt += "NO FACE"
}
txt += "<br><br>idle / init / resize / detect:<br> "
txt += cvIdleTime.toFixed(2)
txt += " " + (cvMatTime).toFixed(2)
txt += " " + (cvResizeTime).toFixed(2)
txt += " " + (cvFaceTime).toFixed(2)
} else {
txt += "(Initializing OpenCV)"
}
txt += "</center>"
this.messageText = txt;
if (this.messageText != this.textBox.innerHTML) {
this.textBox.innerHTML = this.messageText;
}
// do another check on frame size, to make sure we don't miss one
var viewport = frame.views[0].getViewport(this.session.baseLayer);
if (viewport.width != viewWidth || viewport.height != viewHeight) {
viewHeight = viewport.height;
viewWidth = viewport.width;
console.log("noticed view size is different than saved view size, trigger adjust canvas size")
this.triggerResize = true;
}
}
// show the video frame for debugging
showVideoFrame(videoFrame) {
var camera = videoFrame.camera
var buffer = videoFrame.buffer(0)
// this buffer is created in the worker, and if the worker
// doesn't access the frame, it won't be created. So don't
// display the buffer if there is nothing to display yet!
if (!(buffer._buffer instanceof ArrayBuffer)) {
return;
}
if (!cvImageBuff || cvImageBuff.length != buffer._buffer.length) {
cvImageBuff = new Uint8ClampedArray(buffer._buffer);
}
var data = cvImageBuff
switch (videoFrame.pixelFormat) {
case XRVideoFrame.IMAGEFORMAT_YUV420P:
var len = buffer.buffer.length
var rowExtra = buffer.size.bytesPerRow - buffer.size.bytesPerPixel * buffer.size.width;
var newData = new Uint8ClampedArray(len*4)
var j=0;
var i=0;
for (var r = 0; r < buffer.size.height; r++ ) {
for (var c = 0; c < buffer.size.width; c++) {
newData[j++] = data[i];
newData[j++] = data[i];
newData[j++] = data[i];
newData[j++] = 255;
i++;
}
i += rowExtra;
}
data = newData;
break;
}
var imgData = new ImageData(data, buffer.size.width, buffer.size.height);
tempVideoCanvasCtx.putImageData(imgData,0,0);
var width = buffer.size.width
var height = buffer.size.height
cvImageCtx.clearRect(0, 0, width, height);
if (camera.cameraOrientation == 90 || camera.cameraOrientation == -90) {
cvImageCtx.translate(height/2, width/2);
} else {
cvImageCtx.translate(width/2, height/2);
}
// rotate by negative, since the cameraOrientation is the orientation of the
// camera relative to view and we want to undo that
cvImageCtx.rotate(-camera.cameraOrientation * Math.PI/180);
cvImageCtx.drawImage(tempVideoCanvas, -width/2, -height/2);
cvImageCtx.resetTransform()
}
}
function opencvIsReady() {
console.log('OpenCV.js is ready');
openCVready = true
}
function AxesHelper( size ) {
size = size || 1;
var vertices = [
0, 0, 0, size, 0, 0,
0, 0, 0, 0, size, 0,
0, 0, 0, 0, 0, size
];
var colors = [
1, 0, 0, 1, 0.6, 0,
0, 1, 0, 0.6, 1, 0,
0, 0, 1, 0, 0.6, 1
];
var geometry = new THREE.BufferGeometry();
geometry.addAttribute( 'position', new THREE.Float32BufferAttribute( vertices, 3 ) );
geometry.addAttribute( 'color', new THREE.Float32BufferAttribute( colors, 3 ) );
var material = new THREE.LineBasicMaterial( { vertexColors: THREE.VertexColors } );
return new THREE.LineSegments(geometry, material);
}
window.addEventListener('DOMContentLoaded', () => {
setTimeout(() => {
try {
window.pageApp = new ARAnchorExample(document.getElementById('target'))
} catch(e) {
console.error('page error', e)
}
}, 1000)
})
</script>
</body>
</html>
You can’t perform that action at this time.
You signed in with another tab or window. Reload to refresh your session. You signed out in another tab or window. Reload to refresh your session.