Skip to content

Commit

Permalink
Port of the lookatRH function from glm to TensorFlow.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 269161158
  • Loading branch information
julienvalentin authored and Copybara-Service committed Sep 15, 2019
1 parent 2796ce9 commit 48af71a
Show file tree
Hide file tree
Showing 3 changed files with 135 additions and 1 deletion.
1 change: 1 addition & 0 deletions tensorflow_graphics/rendering/opengl/BUILD
Expand Up @@ -41,6 +41,7 @@ py_library(
# google internal rule 1
deps = [
# google internal package dependency 1,
"//tensorflow_graphics/math:vector",
"//tensorflow_graphics/util:asserts",
"//tensorflow_graphics/util:export_api",
"//tensorflow_graphics/util:shape",
Expand Down
67 changes: 66 additions & 1 deletion tensorflow_graphics/rendering/opengl/math.py
Expand Up @@ -20,6 +20,7 @@
import math
import tensorflow as tf

from tensorflow_graphics.math import vector
from tensorflow_graphics.util import asserts
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
Expand All @@ -36,7 +37,7 @@ def perspective_right_handed(vertical_field_of_view,
In the following, A1 to An are optional batch dimensions.
Args:
vertical_field_of_view: A tensor of shape `[A1, ..., An]`, where the last
vertical_field_of_view: A tensor of shape `[A1, ..., An, C]`, where the last
dimension represents the vertical field of view of the frustum expressed
in radians. Note that values for `vertical_field_of_view` must be in the
range (0,pi).
Expand Down Expand Up @@ -96,7 +97,71 @@ def perspective_right_handed(vertical_field_of_view,
(far + near) / near_minus_far, 2.0 * far * near / near_minus_far),
axis=-1)
w = tf.stack((zero, zero, -one, zero), axis=-1)
return tf.stack((x, y, z, w), axis=-2)


def look_at_right_handed(camera_position, look_at, up_vector, name=None):
"""Builds a right handed look at view matrix.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last
dimension represents the 3D position of the camera.
look_at: A tensor of shape `[A1, ..., An, 3]`, with the last dimension
storing the position where the camera is looking at.
up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
defines the up vector of the camera.
name: A name for this op. Defaults to 'look_at_right_handed'.
Raises:
ValueError: if the all the inputs are not of the same shape, or if any input
of of an unsupported shape.
Returns:
A tensor of shape `[A1, ..., An, 4, 4]`, containing right handed look at
matrices.
"""
with tf.compat.v1.name_scope(name, "look_at_right_handed",
[camera_position, look_at, up_vector]):
camera_position = tf.convert_to_tensor(value=camera_position)
look_at = tf.convert_to_tensor(value=look_at)
up_vector = tf.convert_to_tensor(value=up_vector)

shape.check_static(
tensor=camera_position,
tensor_name="camera_position",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=look_at, tensor_name="look_at", has_dim_equals=(-1, 3))
shape.check_static(
tensor=up_vector, tensor_name="up_vector", has_dim_equals=(-1, 3))
shape.compare_batch_dimensions(
tensors=(camera_position, look_at, up_vector),
last_axes=-2,
tensor_names=("camera_position", "look_at", "up_vector"),
broadcast_compatible=False)

z_axis = tf.linalg.l2_normalize(look_at - camera_position, axis=-1)
horizontal_axis = tf.linalg.l2_normalize(
vector.cross(z_axis, up_vector), axis=-1)
vertical_axis = vector.cross(horizontal_axis, z_axis)

batch_shape = tf.shape(input=horizontal_axis)[:-1]
zeros = tf.zeros(
shape=tf.concat((batch_shape, (3,)), axis=-1),
dtype=horizontal_axis.dtype)
one = tf.ones(
shape=tf.concat((batch_shape, (1,)), axis=-1),
dtype=horizontal_axis.dtype)
x = tf.concat(
(horizontal_axis, -vector.dot(horizontal_axis, camera_position)),
axis=-1)
y = tf.concat((vertical_axis, -vector.dot(vertical_axis, camera_position)),
axis=-1)
z = tf.concat((-z_axis, vector.dot(z_axis, camera_position)), axis=-1)
w = tf.concat((zeros, one), axis=-1)
return tf.stack((x, y, z, w), axis=-2)


Expand Down
68 changes: 68 additions & 0 deletions tensorflow_graphics/rendering/opengl/tests/math_test.py
Expand Up @@ -146,6 +146,74 @@ def test_perspective_right_handed_cross_jacobian_random(self):
self.assert_jacobian_is_correct(near_tensor, near_init, y)
self.assert_jacobian_is_correct(far_tensor, far_init, y)

def test_look_at_right_handed_preset(self):
"""Tests that look_at_right_handed generates expected results.."""
camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))
look_at = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))
up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))

pred = glm.look_at_right_handed(camera_position, look_at, up_vector)
gt = (((-1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0),
(0.0, 0.0, 0.0, 1.0)),
((4.08248186e-01, -8.16496551e-01, 4.08248395e-01, -2.98023224e-08),
(-7.07106888e-01, 1.19209290e-07, 7.07106769e-01, -1.41421378e-01),
(-5.77350318e-01, -5.77350318e-01, -5.77350318e-01,
3.46410215e-01), (0.0, 0.0, 0.0, 1.0)))
self.assertAllClose(pred, gt)

@parameterized.parameters(
((3,), (3,), (3,)),
((None, 3), (None, 3), (None, 3)),
((None, 2, 3), (None, 2, 3), (None, 2, 3)),
)
def test_look_at_right_handed_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(glm.look_at_right_handed, shapes)

@parameterized.parameters(
("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,)),
("must have exactly 3 dimensions in axis -1", (3,), (3,), (1,)),
("Not all batch dimensions are identical", (3,), (3, 3), (3, 3)),
)
def test_look_at_right_handed_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(glm.look_at_right_handed, error_msg, shapes)

def test_look_at_right_handed_jacobian_preset(self):
"""Tests the Jacobian of look_at_right_handed."""
camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)))
look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)))
up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)))
camera_position_tensor = tf.convert_to_tensor(value=camera_position_init)
look_at_tensor = tf.convert_to_tensor(value=look_at_init)
up_vector_tensor = tf.convert_to_tensor(value=up_vector_init)
y = glm.look_at_right_handed(camera_position_tensor, look_at_tensor,
up_vector_tensor)

self.assert_jacobian_is_correct(camera_position_tensor,
camera_position_init, y)
self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y)
self.assert_jacobian_is_correct(up_vector_tensor, up_vector_init, y)

def test_look_at_right_handed_jacobian_random(self):
"""Tests the Jacobian of look_at_right_handed."""
tensor_size = np.random.randint(1, 3)
tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist()
camera_position_init = np.random.uniform(size=tensor_shape + [3])
look_at_init = np.random.uniform(size=tensor_shape + [3])
up_vector_init = np.random.uniform(size=tensor_shape + [3])
camera_position_tensor = tf.convert_to_tensor(value=camera_position_init)
look_at_tensor = tf.convert_to_tensor(value=look_at_init)
up_vector_tensor = tf.convert_to_tensor(value=up_vector_init)
y = glm.look_at_right_handed(camera_position_tensor, look_at_tensor,
up_vector_tensor)

self.assert_jacobian_is_correct(camera_position_tensor,
camera_position_init, y)
self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y)
self.assert_jacobian_is_correct(up_vector_tensor, up_vector_init, y)


if __name__ == "__main__":
test_case.main()

0 comments on commit 48af71a

Please sign in to comment.