Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Various webxr WPT fixes #25259

Merged
merged 11 commits into from Dec 13, 2019

Some generated files are not rendered by default. Learn more.

@@ -20,7 +20,7 @@ use ipc_channel::ipc::IpcSender;
use ipc_channel::router::ROUTER;
use profile_traits::ipc;
use std::rc::Rc;
use webxr_api::{MockDeviceMsg, View, Views};
use webxr_api::{MockDeviceMsg, MockViewInit, MockViewsInit};

#[dom_struct]
pub struct FakeXRDevice {
@@ -50,58 +50,57 @@ impl FakeXRDevice {
}
}

pub fn get_views(views: &[FakeXRViewInit]) -> Fallible<Views> {
if views.len() != 2 {
return Err(Error::NotSupported);
}

let (left, right) = match (views[0].eye, views[1].eye) {
(XREye::Left, XREye::Right) => (&views[0], &views[1]),
(XREye::Right, XREye::Left) => (&views[1], &views[0]),
_ => return Err(Error::NotSupported),
};

if left.projectionMatrix.len() != 16 ||
right.projectionMatrix.len() != 16 ||
left.viewOffset.position.len() != 3 ||
right.viewOffset.position.len() != 3
{
pub fn view<Eye>(view: &FakeXRViewInit) -> Fallible<MockViewInit<Eye>> {
if view.projectionMatrix.len() != 16 || view.viewOffset.position.len() != 3 {
return Err(Error::Type("Incorrectly sized array".into()));
}

let mut proj_l = [0.; 16];
let mut proj_r = [0.; 16];
let v: Vec<_> = left.projectionMatrix.iter().map(|x| **x).collect();
proj_l.copy_from_slice(&v);
let proj_l = Transform3D::from_array(proj_l);
let v: Vec<_> = right.projectionMatrix.iter().map(|x| **x).collect();
proj_r.copy_from_slice(&v);
let proj_r = Transform3D::from_array(proj_r);
let mut proj = [0.; 16];
let v: Vec<_> = view.projectionMatrix.iter().map(|x| **x).collect();
proj.copy_from_slice(&v);
let projection = Transform3D::from_array(proj);

// spec defines offsets as origins, but mock API expects the inverse transform
let offset_l = get_origin(&left.viewOffset)?.inverse();
let offset_r = get_origin(&right.viewOffset)?.inverse();

let size_l = Size2D::new(views[0].resolution.width, views[0].resolution.height);
let size_r = Size2D::new(views[1].resolution.width, views[1].resolution.height);
let transform = get_origin(&view.viewOffset)?.inverse();

let origin_l = Point2D::new(0, 0);
let origin_r = Point2D::new(size_l.width, 0);

let viewport_l = Rect::new(origin_l, size_l);
let viewport_r = Rect::new(origin_r, size_r);

let left = View {
projection: proj_l,
transform: offset_l,
viewport: viewport_l,
let size = Size2D::new(view.resolution.width, view.resolution.height);
let origin = match view.eye {
XREye::Right => Point2D::new(size.width, 0),
_ => Point2D::new(0, 0),
};
let right = View {
projection: proj_r,
transform: offset_r,
viewport: viewport_r,
let viewport = Rect::new(origin, size);

let fov = if let Some(ref fov) = view.fieldOfView {
Some((
fov.leftDegrees.to_radians(),
fov.rightDegrees.to_radians(),
fov.upDegrees.to_radians(),
fov.downDegrees.to_radians(),
))
} else {
None
};
Ok(Views::Stereo(left, right))

Ok(MockViewInit {
projection,
transform,
viewport,
fov,
})
}
pub fn get_views(views: &[FakeXRViewInit]) -> Fallible<MockViewsInit> {
match views.len() {
1 => Ok(MockViewsInit::Mono(view(&views[0])?)),
2 => {
let (left, right) = match (views[0].eye, views[1].eye) {
(XREye::Left, XREye::Right) => (&views[0], &views[1]),
(XREye::Right, XREye::Left) => (&views[1], &views[0]),
_ => return Err(Error::NotSupported),
};
Ok(MockViewsInit::Stereo(view(left)?, view(right)?))
},
_ => Err(Error::NotSupported),
}
}

pub fn get_origin<T, U>(
@@ -134,15 +133,33 @@ impl FakeXRDeviceMethods for FakeXRDevice {
Ok(())
}

/// https://github.com/immersive-web/webxr-test-api/blob/master/explainer.md
/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-setviewerorigin
fn SetViewerOrigin(
&self,
origin: &FakeXRRigidTransformInit,
_emulated_position: bool,
) -> Fallible<()> {
let _ = self
.sender
.send(MockDeviceMsg::SetViewerOrigin(get_origin(origin)?));
.send(MockDeviceMsg::SetViewerOrigin(Some(get_origin(origin)?)));
Ok(())
}

/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-clearviewerorigin
fn ClearViewerOrigin(&self) {
let _ = self.sender.send(MockDeviceMsg::SetViewerOrigin(None));
}

/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-clearfloororigin
fn ClearFloorOrigin(&self) {
let _ = self.sender.send(MockDeviceMsg::SetFloorOrigin(None));
}

/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-setfloororigin
fn SetFloorOrigin(&self, origin: &FakeXRRigidTransformInit) -> Fallible<()> {
let _ = self
.sender
.send(MockDeviceMsg::SetFloorOrigin(Some(get_origin(origin)?)));
Ok(())
}

@@ -13,8 +13,11 @@ interface FakeXRDevice {
// // behaves as if device was disconnected
// Promise<void> disconnect();

// Sets the origin of the viewer
[Throws] void setViewerOrigin(FakeXRRigidTransformInit origin, optional boolean emulatedPosition = false);
void clearViewerOrigin();

[Throws] void setFloorOrigin(FakeXRRigidTransformInit origin);
void clearFloorOrigin();

// // Simulates devices focusing and blurring sessions.
// void simulateVisibilityChange(XRVisibilityState);
@@ -40,6 +43,8 @@ dictionary FakeXRViewInit {
required FakeXRRigidTransformInit viewOffset;
// https://immersive-web.github.io/webxr/#dom-xrwebgllayer-getviewport
required FakeXRDeviceResolution resolution;

FakeXRFieldOfViewInit fieldOfView;
};

// https://immersive-web.github.io/webxr/#xrviewport
@@ -56,3 +61,10 @@ dictionary FakeXRRigidTransformInit {
required sequence<float> position;
required sequence<float> orientation;
};

dictionary FakeXRFieldOfViewInit {
required float upDegrees;
required float downDegrees;
required float leftDegrees;
required float rightDegrees;
};
@@ -14,6 +14,6 @@ dictionary XRRenderStateInit {
[SecureContext, Exposed=Window, Pref="dom.webxr.enabled"] interface XRRenderState {
readonly attribute double depthNear;
readonly attribute double depthFar;
readonly attribute double inlineVerticalFieldOfView;
readonly attribute double? inlineVerticalFieldOfView;
readonly attribute XRWebGLLayer? baseLayer;
};
@@ -162,12 +162,12 @@ impl XRMethods for XR {
) -> Rc<Promise> {
let promise = Promise::new_in_current_compartment(&self.global(), comp);

if !ScriptThread::is_user_interacting() {
promise.reject_error(Error::Security);
return promise;
}

if mode != XRSessionMode::Inline {
if !ScriptThread::is_user_interacting() {
promise.reject_error(Error::Security);
return promise;
}

if self.pending_or_active_session() {
promise.reject_error(Error::InvalidState);
return promise;
@@ -77,7 +77,11 @@ impl XRFrameMethods for XRFrame {
return Err(Error::InvalidState);
}

let pose = reference.get_viewer_pose(&self.data);
let pose = if let Some(pose) = reference.get_viewer_pose(&self.data) {
pose
} else {
return Ok(None);
};
Ok(Some(XRViewerPose::new(&self.global(), &self.session, pose)))
}

@@ -10,10 +10,10 @@ use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::globalscope::GlobalScope;
use crate::dom::xrrigidtransform::XRRigidTransform;
use crate::dom::xrsession::{cast_transform, ApiPose, ApiRigidTransform, ApiViewerPose, XRSession};
use crate::dom::xrsession::{cast_transform, ApiPose, ApiViewerPose, XRSession};
use crate::dom::xrspace::XRSpace;
use dom_struct::dom_struct;
use euclid::{RigidTransform3D, Vector3D};
use euclid::RigidTransform3D;
use webxr_api::Frame;

#[dom_struct]
@@ -80,23 +80,22 @@ impl XRReferenceSpace {
///
/// This is equivalent to `get_pose(self).inverse() * get_pose(viewerSpace)` (in column vector notation),
/// however we specialize it to be efficient
pub fn get_viewer_pose(&self, base_pose: &Frame) -> ApiViewerPose {
let pose = self.get_unoffset_viewer_pose(base_pose);
pub fn get_viewer_pose(&self, base_pose: &Frame) -> Option<ApiViewerPose> {
let pose = self.get_unoffset_viewer_pose(base_pose)?;
// in column-vector notation,
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
// = (get_unoffset_pose(space) * offset).inverse() * get_pose(viewer_space)
// = offset.inverse() * get_unoffset_pose(space).inverse() * get_pose(viewer_space)
// = offset.inverse() * get_unoffset_viewer_pose(space)
let offset = self.offset.transform();
let inverse = offset.inverse();
inverse.pre_transform(&pose)
Some(inverse.pre_transform(&pose))
}

/// Gets pose of the viewer with respect to this space
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_viewer_pose(&self, base_pose: &Frame) -> ApiViewerPose {
let viewer_pose: ApiViewerPose = cast_transform(base_pose.transform);
pub fn get_unoffset_viewer_pose(&self, base_pose: &Frame) -> Option<ApiViewerPose> {
// all math is in column-vector notation
// we use the following equation to verify correctness here:
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
@@ -105,25 +104,27 @@ impl XRReferenceSpace {
// get_viewer_pose(eye_level) = get_pose(eye_level).inverse() * get_pose(viewer_space)
// = I * viewer_pose
// = viewer_pose
let viewer_pose: ApiViewerPose = cast_transform(base_pose.transform?);

// we get viewer poses in eye-level space by default
viewer_pose
Some(viewer_pose)
},
XRReferenceSpaceType::Local_floor => {
// XXXManishearth support getting floor info from stage parameters

// get_viewer_pose(floor_level) = get_pose(floor_level).inverse() * get_pose(viewer_space)
// = Translate(-2).inverse() * viewer_pose
// = Translate(2) * viewer_pose
// = floor_to_native.inverse() * viewer_pose
// = native_to_floor * viewer_pose
let viewer_pose = base_pose.transform?;
let native_to_floor = self
.upcast::<XRSpace>()
.session()
.with_session(|s| s.floor_transform())?;

// assume approximate user height of 2 meters
let floor_to_eye: ApiRigidTransform = Vector3D::new(0., 2., 0.).into();
floor_to_eye.pre_transform(&viewer_pose)
Some(cast_transform(native_to_floor.pre_transform(&viewer_pose)))
},
XRReferenceSpaceType::Viewer => {
// This reference space follows the viewer around, so the viewer is
// always at an identity transform with respect to it
RigidTransform3D::identity()
Some(RigidTransform3D::identity())
},
_ => unimplemented!(),
}
@@ -134,34 +135,34 @@ impl XRReferenceSpace {
/// The reference origin used is common between all
/// get_pose calls for spaces from the same device, so this can be used to compare
/// with other spaces
pub fn get_pose(&self, base_pose: &Frame) -> ApiPose {
let pose = self.get_unoffset_pose(base_pose);
pub fn get_pose(&self, base_pose: &Frame) -> Option<ApiPose> {
let pose = self.get_unoffset_pose(base_pose)?;
let offset = self.offset.transform();
// pose is a transform from the unoffset space to native space,
// offset is a transform from offset space to unoffset space,
// we want a transform from unoffset space to native space,
// which is pose * offset in column vector notation
pose.pre_transform(&offset)
Some(pose.pre_transform(&offset))
}

/// Gets pose represented by this space
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_pose(&self, base_pose: &Frame) -> ApiPose {
pub fn get_unoffset_pose(&self, base_pose: &Frame) -> Option<ApiPose> {
match self.ty {
XRReferenceSpaceType::Local => {
// The eye-level pose is basically whatever the headset pose was at t=0, which
// for most devices is (0, 0, 0)
RigidTransform3D::identity()
Some(RigidTransform3D::identity())
},
XRReferenceSpaceType::Local_floor => {
// XXXManishearth support getting floor info from stage parameters

// Assume approximate height of 2m
// the floor-level space is 2m below the eye-level space, which is (0, 0, 0)
Vector3D::new(0., -2., 0.).into()
let native_to_floor = self
.upcast::<XRSpace>()
.session()
.with_session(|s| s.floor_transform())?;
Some(cast_transform(native_to_floor.inverse()))
},
XRReferenceSpaceType::Viewer => cast_transform(base_pose.transform),
XRReferenceSpaceType::Viewer => base_pose.transform.map(cast_transform),
_ => unimplemented!(),
}
}
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.