Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
434 lines (368 sloc) 16.3 KB
<html>
<!-- Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<head>
<title>ARKit face tracking example</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<link rel="stylesheet" href="../common.css"/>
<script src="../libs/three/three.js"></script>
<script src="../libs/three/loaders/GLTFLoader.js"></script>
<script src="../libs/dat.gui.min.js"></script>
<script module src="../../dist/webxr.js"></script>
<style>
.dg {
margin-top:30px !important;
}
</style>
<!-- <script module src="../../dist/webxr.js"></script> -->
</head>
<body>
<div id="description">
<h2>ARKit face tracking Example</h2>
<h5>(click to dismiss)</h5>
<p>This detects and tracks your face using ARKit and places a 3D model on it.
(Glasses model by <a href="https://sketchfab.com/models/5c78f100eea749c895d69fe2ed728197#">person-x</a>)</p>
</div>
<button type=button id=go-button>Go</button>
<script type=module>
// some dependencies and utilities
import * as mat4 from '../libs/gl-matrix/mat4.js'
import * as vec3 from '../libs/gl-matrix/vec3.js'
import XREngine from "../XREngine.js"
let device = null
let session = null
let eyeLevelFrameOfReference = null
let engine = null
var meshMap = new Map()
// temporary working variables
const workingMatrix = mat4.create()
const workingVec3 = vec3.create()
var savedOrigin = [0,0,0]
var savedDirection = [0,0,-1]
var ambientLight = null
var directionalLight = null
// add dat.GUI to the left HUD. We hid it in stereo viewing, so we don't need to
// figure out how to duplicate it.
let params = {
face: 'occlusion',
ducky: false,
glasses: true
};
var gui = new dat.GUI({ autoPlace: false });
gui.add( params, 'face', { 'Occlusion Only': 'occlusion', 'Oclussion & Mesh': 'both', 'Transparent Mesh': 'transparent' } );
gui.add( params, 'ducky' );
gui.add( params, 'glasses' );
gui.domElement.id = 'gui';
gui.open();
var duckyCreated = false
var glassesCreated = false;
var material = null
var wireMaterial = null
var ducky = new THREE.Group()
ducky.name = "Duck group"
var loader = new THREE.GLTFLoader().setPath( './' );
loader.load( 'DuckyMesh.glb',
function (gltf) {
let duckyNode = gltf.scene
duckyNode.position.set(0, -0.25, -0.2)
duckyNode.scale.set(3,3,3)
ducky.add(duckyNode)
duckyCreated = true
},
null, // progress callback
function(e) {
console.error('could not load gltf', ...params)
})
loader.setPath('./glasses/')
var glasses = new THREE.Group()
glasses.name = "Glasses group"
loader.load( 'glasses.gltf',
function (gltf) {
let glassesNode = gltf.scene
glassesNode.position.set(0, 0.01, 0.06)
glassesNode.scale.set(0.0005,0.0005,0.0005)
glasses.add(glassesNode)
glassesCreated = true
},
null, // progress callback
function(e) {
console.error('could not load gltf', ...params)
})
function initializeScene() {
ambientLight = engine.addAmbientLight()
directionalLight = engine.addDirectionalLight()
// Add a box and axis at the origin of the eye-level coordinate system
// for debugging by uncommenting these lines
// engine.addBox([0, 0, 0], [0.025, 0.025, 0.025], 0x44ff44)
// engine.addAxesHelper([0,0,0], [0.2,0.2,0.2])
wireMaterial = new THREE.MeshPhongMaterial({color: '#999999', wireframe: true})
material = new THREE.MeshPhongMaterial({color: '#999900', transparent: true, opacity: 0.5})
}
// Called once per frame, before render, to give the app a chance to update this.scene
function updateScene(frame){
let worldInfo = frame.worldInformation
if(worldInfo.estimatedLight){
let ambientIntensity = worldInfo.estimatedLight.ambientIntensity
ambientLight.intensity = ambientIntensity * 0.5;
directionalLight.intensity = ambientIntensity * 0.1;
}
if(worldInfo.meshes){
meshMap.forEach(object => { object.seen = false })
worldInfo.meshes.forEach(worldMesh => {
var object = meshMap.get(worldMesh.uid);
if (object) {
handleUpdateNode(worldMesh, object)
} else if (worldMesh instanceof XRFaceMesh) {
handleNewNode(worldMesh)
var object = meshMap.get(worldMesh.uid);
}
if (object) {
if (params.ducky) {
if (ducky.parent != object.faceMesh) {
object.faceMesh.add(ducky)
}
} else {
object.faceMesh.remove(ducky)
}
if (params.glasses) {
if (glasses.parent != object.faceMesh) {
object.faceMesh.add(glasses)
}
} else {
object.faceMesh.remove(glasses)
}
}
})
if (material) {
if (params.face == "occlusion") {
material.colorWrite = false; // only update the depth
wireMaterial.colorWrite = false; // only update the depth
material.transparent = false;
} else if (params.face == "transparent") {
material.colorWrite = true; // only update the depth
wireMaterial.colorWrite = true; // only update the depth
material.transparent = true;
} else {
material.colorWrite = true; // only update the depth
wireMaterial.colorWrite = true; // only update the depth
material.transparent = false;
}
}
meshMap.forEach(object => {
if (!object.seen) {
handleRemoveNode(object)
}
})
}
}
function handleRemoveNode(object) {
object.geometry.dispose()
engine.removeAnchoredNode(object.faceMesh);
meshMap.delete(object.worldMesh.uid)
}
function handleUpdateNode(worldMesh, object) {
object.seen = true
// we don't need to do anything if the timestamp isn't updated
if (worldMesh.timeStamp <= object.ts) {
return;
}
let currentVertexIndex = 0
if (worldMesh.vertexPositionsChanged) {
let position = object.geometry.attributes.position
if (position.array.length != worldMesh.vertexPositions.length) {
console.error("position and vertex arrays are different sizes", position, worldMesh)
}
position.setArray(worldMesh.vertexPositions);
position.needsUpdate = true;
}
if (worldMesh.vertexNormalsChanged && worldMap.vertexNormals.length > 0) {
// normals are optional
let normals = object.geometry.attributes.normals
if (normals.array.length != worldMesh.vertexNormals) {
console.error("uv and vertex arrays are different sizes", normals, worldMesh)
}
normals.setArray(worldMesh.vertexNormals);
normals.needsUpdate = true;
}
/// these ones probably will not change while the face is tracked, but
/// for future changes to the underlying detector, keep them here
if (worldMesh.textureCoordinatesChanged) {
let uv = object.geometry.attributes.uv
if (uv.array.length != worldMesh.textureCoordinates.length) {
console.error("uv and vertex arrays are different sizes", uv, worldMesh)
}
uv.setArray(worldMesh.textureCoordinates);
uv.needsUpdate = true;
}
if (worldMesh.triangleIndicesChanged) {
let index = object.geometry.index
if (index.array.length != worldMesh.triangleIndices) {
console.error("uv and vertex arrays are different sizes", index, worldMesh)
}
index.setArray(worldMesh.triangleIndices);
index.needsUpdate = true;
}
}
function handleNewNode(worldMesh) {
let geometry = new THREE.BufferGeometry()
let indices = new THREE.BufferAttribute(worldMesh.triangleIndices, 1)
indices.dynamic = true
geometry.setIndex(indices)
let verticesBufferAttribute = new THREE.BufferAttribute( worldMesh.vertexPositions, 3 )
verticesBufferAttribute.dynamic = true
geometry.addAttribute( 'position', verticesBufferAttribute );
let uvBufferAttribute = new THREE.BufferAttribute( worldMesh.textureCoordinates, 2 )
uvBufferAttribute.dynamic = true
geometry.addAttribute( 'uv', uvBufferAttribute );
if (worldMesh.vertexNormals.length > 0) {
let normalsBufferAttribute = new THREE.BufferAttribute( worldMesh.vertexNormals, 3 )
normalsBufferAttribute.dynamic = true
geometry.addAttribute( 'normal', normalsBufferAttribute );
} else {
geometry.computeVertexNormals()
}
let transparentMesh = new THREE.Group()
var tm = new THREE.Mesh(geometry, material)
tm.renderOrder = -2
transparentMesh.add(tm)
tm = new THREE.Mesh(geometry, wireMaterial)
tm.renderOrder = -2
transparentMesh.add(tm)
let faceMesh = new THREE.Group();
faceMesh.add(transparentMesh)
engine.addAnchoredNode(worldMesh, faceMesh)
meshMap.set(worldMesh.uid, {
ts: worldMesh.timeStamp,
worldMesh: worldMesh,
seen: true,
geometry: geometry,
faceMesh: faceMesh,
transparentMesh: transparentMesh,
threeMesh: transparentMesh
})
}
////////////////////////////////////////////////////
////////////////////////////////////////////////////
// BOILER PLATE. Can you feel the plates boiling?
//
// There are slight differences between examples but largely this code is similar
//
// Create the output context where the XRSession will place composited renders
const xrCanvas = document.createElement('canvas')
xrCanvas.setAttribute('class', 'xr-canvas')
const xrContext = xrCanvas.getContext('xrpresent')
if(!xrContext){
console.error('No XR context', xrCanvas)
}
// get the XR Device
navigator.xr.requestDevice().then(xrDevice => {
device = xrDevice
}).catch(err => {
})
document.getElementById('description').addEventListener('touchstart', hideMe, {capture: true})
function hideMe(event) {
event.target.style.display = 'none'
event.stopPropagation()
}
document.getElementById('go-button').addEventListener('click', handleStartSessionRequest, true)
document.getElementById('go-button').addEventListener('touchstart', handleGoButtonTouch, true)
function handleGoButtonTouch(event) {
event.stopPropagation()
}
/////////////////////
// Session startup / shutdown
function handleStartSessionRequest(ev){
if(device === null){
console.error('No xr device')
return
}
if (!session) {
device.requestSession({
outputContext: xrContext,
worldSensing: true
}).then(handleSessionStarted)
.catch(err => {
console.error('Session setup error', err)
})
document.getElementById('go-button').innerText = "End"
document.getElementById('go-button').style.display = 'none'
document.body.appendChild(gui.domElement);
} else {
session.end()
handleSessionEnded();
document.getElementById('description').style.display = 'block'
document.getElementById('go-button').style.display = "block"
document.getElementById('go-button').innerText = "Go"
document.body.removeChild(gui.domElement);
}
}
function handleSessionEnded() {
session = null
}
function handleSessionStarted(xrSession){
session = xrSession
document.body.insertBefore(xrCanvas, document.body.firstChild)
let sensingState = xrSession.updateWorldSensingState({
illuminationDetectionState : {
enabled : true
},
meshDetectionState : {
enabled : true
}
})
// Create the context where we will render our 3D scene
const canvas = document.createElement('canvas')
var glContext = canvas.getContext('webgl', {
compatibleXRDevice: device
})
if(!glContext) throw new Error('Could not create a webgl context')
// Set up the base layer
session.baseLayer = new XRWebGLLayer(session, glContext)
// Create a simple test scene and renderer
// The engine's scene is in the eye-level coordinate system
// Our custom engine class does hit testing at the end of each rAF
engine = new XREngine(canvas, glContext)
createRootNode().then(() => {
// Kick off rendering
session.requestAnimationFrame(handleAnimationFrame)
})
initializeScene()
}
async function createRootNode() {
let headFrameOfReference = await session.requestFrameOfReference('head-model')
eyeLevelFrameOfReference = await session.requestFrameOfReference('eye-level')
// get the location of the device, and use it to create an
// anchor with the identity orientation
headFrameOfReference.getTransformTo(eyeLevelFrameOfReference, workingMatrix)
mat4.getTranslation(workingVec3, workingMatrix)
mat4.fromTranslation(workingMatrix, workingVec3)
let anchor = await session.addAnchor(workingMatrix, eyeLevelFrameOfReference)
engine.addAnchoredNode(anchor, engine.root)
return true
}
////////////////
// render loop
function handleAnimationFrame(t, frame){
if(!session || session.ended) return
updateScene(frame)
session.requestAnimationFrame(handleAnimationFrame)
let pose = frame.getDevicePose(eyeLevelFrameOfReference)
if(!pose){
console.log('No pose')
return
}
engine.startFrame()
for (let view of frame.views) {
engine.preRender(
session.baseLayer.getViewport(view),
view.projectionMatrix,
pose.getViewMatrix(view)
)
engine.render()
}
}
</script>
</body>
</html>
You can’t perform that action at this time.