From 321057209f84da03f3f842ed7a5a4bc053084825 Mon Sep 17 00:00:00 2001 From: julienvalentin Date: Mon, 9 Sep 2019 05:15:35 -0700 Subject: [PATCH] Port of the OpenGL transformation bringing points in model to screen space. PiperOrigin-RevId: 267975172 --- tensorflow_graphics/rendering/BUILD | 1 + tensorflow_graphics/rendering/__init__.py | 1 + tensorflow_graphics/rendering/opengl/BUILD | 68 ++ .../rendering/opengl/__init__.py | 23 + tensorflow_graphics/rendering/opengl/math.py | 492 +++++++++++ .../rendering/opengl/tests/math_test.py | 829 ++++++++++++++++++ 6 files changed, 1414 insertions(+) create mode 100644 tensorflow_graphics/rendering/opengl/BUILD create mode 100644 tensorflow_graphics/rendering/opengl/__init__.py create mode 100644 tensorflow_graphics/rendering/opengl/math.py create mode 100644 tensorflow_graphics/rendering/opengl/tests/math_test.py diff --git a/tensorflow_graphics/rendering/BUILD b/tensorflow_graphics/rendering/BUILD index 657340b82..2000ff8e6 100644 --- a/tensorflow_graphics/rendering/BUILD +++ b/tensorflow_graphics/rendering/BUILD @@ -30,6 +30,7 @@ py_library( visibility = ["//visibility:public"], deps = [ "//tensorflow_graphics/rendering/camera", + "//tensorflow_graphics/rendering/opengl", "//tensorflow_graphics/rendering/reflectance", "//tensorflow_graphics/util:export_api", ], diff --git a/tensorflow_graphics/rendering/__init__.py b/tensorflow_graphics/rendering/__init__.py index 71ceb500e..a20ba170c 100644 --- a/tensorflow_graphics/rendering/__init__.py +++ b/tensorflow_graphics/rendering/__init__.py @@ -17,6 +17,7 @@ from __future__ import print_function from tensorflow_graphics.rendering import camera +from tensorflow_graphics.rendering import opengl from tensorflow_graphics.rendering import reflectance from tensorflow_graphics.util import export_api as _export_api diff --git a/tensorflow_graphics/rendering/opengl/BUILD b/tensorflow_graphics/rendering/opengl/BUILD new file mode 100644 index 000000000..36a549eb1 --- /dev/null +++ b/tensorflow_graphics/rendering/opengl/BUILD @@ -0,0 +1,68 @@ +#Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Math functionalities for tf-graphics. + +# google internal package dependency 8) +# google internal package dependency 5 + +licenses(["notice"]) # Apache 2.0 + +package(default_visibility = ["//visibility:public"]) + +py_library( + name = "opengl", + srcs = [ + "__init__.py", + ], + srcs_version = "PY2AND3", + # google internal rule 1 + visibility = ["//visibility:public"], + deps = [ + ":math", + "//tensorflow_graphics/util:export_api", + ], +) + +py_library( + name = "math", + srcs = ["math.py"], + srcs_version = "PY2AND3", + # google internal rule 1 + deps = [ + # google internal package dependency 1, + "//tensorflow_graphics/math:vector", + "//tensorflow_graphics/util:asserts", + "//tensorflow_graphics/util:export_api", + "//tensorflow_graphics/util:shape", + ], +) + +py_test( + name = "math_test", + srcs = ["tests/math_test.py"], + srcs_version = "PY2AND3", + # google internal rule 1 + # google internal rule 2 + # google internal rule 3 + # google internal rule 4 + # google internal rule 5 + # google internal rule 6 + deps = [ + ":math", + # google internal package dependency 2 + # google internal package dependency 6 + # google internal package dependency 1, + "//tensorflow_graphics/util:test_case", + ], +) diff --git a/tensorflow_graphics/rendering/opengl/__init__.py b/tensorflow_graphics/rendering/opengl/__init__.py new file mode 100644 index 000000000..2bb6d4926 --- /dev/null +++ b/tensorflow_graphics/rendering/opengl/__init__.py @@ -0,0 +1,23 @@ +#Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""OpenGL module.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow_graphics.rendering.opengl import math +from tensorflow_graphics.util import export_api as _export_api + +# API contains submodules of tensorflow_graphics.rendering. +__all__ = _export_api.get_modules() diff --git a/tensorflow_graphics/rendering/opengl/math.py b/tensorflow_graphics/rendering/opengl/math.py new file mode 100644 index 000000000..ff27eaa8d --- /dev/null +++ b/tensorflow_graphics/rendering/opengl/math.py @@ -0,0 +1,492 @@ +#Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This module implements math routines used by OpenGL.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import tensorflow as tf + +from tensorflow_graphics.math import vector +from tensorflow_graphics.util import asserts +from tensorflow_graphics.util import export_api +from tensorflow_graphics.util import shape + + +def perspective_rh(vertical_field_of_view, aspect_ratio, near, far, name=None): + """Generates the matrix for a right handed perspective-view frustum. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + vertical_field_of_view: A tensor of shape `[A1, ..., An, C]`, where the last + dimension represents the vertical field of view of the frustum. Note that + values for `vertical_field_of_view` must be in the range ]0,pi[. + aspect_ratio: A tensor of shape `[A1, ..., An, C]`, where the last dimension + stores the width over height ratio of the frustum. Note that values for + `aspect_ratio` must be non-negative. + near: A tensor of shape `[A1, ..., An, C]`, where the last dimension + captures the distance between the viewer and the near clipping plane. Note + that values for `near` must be non-negative. + far: A tensor of shape `[A1, ..., An, C]`, where the last dimension + captures the distance between the viewer and the far clipping plane. Note + that values for `far` must be non-negative. + name: A name for this op. Defaults to 'perspective_rh'. + + Raises: + InvalidArgumentError: if any input contains data not in the specified range + of valid values. + ValueError: if the all the inputs are not of the same shape. + + Returns: + A tensor of shape `[A1, ..., An, C, 4, 4]`, containing matrices of right + handed perspective-view frustum. + """ + with tf.compat.v1.name_scope( + name, "perspective_rh", + [vertical_field_of_view, aspect_ratio, near, far]): + vertical_field_of_view = tf.convert_to_tensor(value=vertical_field_of_view) + aspect_ratio = tf.convert_to_tensor(value=aspect_ratio) + near = tf.convert_to_tensor(value=near) + far = tf.convert_to_tensor(value=far) + + shape.compare_batch_dimensions( + tensors=(vertical_field_of_view, aspect_ratio, near, far), + last_axes=-1, + tensor_names=("vertical_field_of_view", "aspect_ratio", "near", "far"), + broadcast_compatible=False) + + vertical_field_of_view = asserts.assert_all_in_range( + vertical_field_of_view, 0.0, math.pi, open_bounds=True) + aspect_ratio = asserts.assert_all_above(aspect_ratio, 0.0, open_bound=True) + near = asserts.assert_all_above(near, 0.0, open_bound=True) + far = asserts.assert_all_above(far, 0.0, open_bound=True) + + tan_half_vertical_field_of_view = tf.tan(vertical_field_of_view * 0.5) + zero = tf.zeros_like(tan_half_vertical_field_of_view) + one = tf.ones_like(tan_half_vertical_field_of_view) + + x = tf.stack( + (1.0 / + (aspect_ratio * tan_half_vertical_field_of_view), zero, zero, zero), + axis=-1) + y = tf.stack((zero, 1.0 / tan_half_vertical_field_of_view, zero, zero), + axis=-1) + z = tf.stack((zero, zero, + (far + near) / (near - far), 2.0 * far * near / (near - far)), + axis=-1) + w = tf.stack((zero, zero, -one, zero), axis=-1) + return tf.stack((x, y, z, w), axis=-2) + + +def look_at_right_handed(camera_position, look_at, up_vector, name=None): + """Builds a right handed look at view matrix. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents the 3D position of the camera. + look_at: A tensor of shape `[A1, ..., An, 3]`, with the last dimension + storing the position where the camera is looking at. + up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension + defines the up vector of the camera. + name: A name for this op. Defaults to 'look_at_right_handed'. + + Raises: + ValueError: if the all the inputs are not of the same shape, or if any input + of of an unsupported shape. + + Returns: + A tensor of shape `[A1, ..., An, 4, 4]`, containing right handed look at + matrices. + """ + with tf.compat.v1.name_scope(name, "look_at_right_handed", + [camera_position, look_at, up_vector]): + camera_position = tf.convert_to_tensor(value=camera_position) + look_at = tf.convert_to_tensor(value=look_at) + up_vector = tf.convert_to_tensor(value=up_vector) + + shape.check_static( + tensor=camera_position, + tensor_name="camera_position", + has_dim_equals=(-1, 3)) + shape.check_static( + tensor=look_at, tensor_name="look_at", has_dim_equals=(-1, 3)) + shape.check_static( + tensor=up_vector, tensor_name="up_vector", has_dim_equals=(-1, 3)) + shape.compare_batch_dimensions( + tensors=(camera_position, look_at, up_vector), + last_axes=-2, + tensor_names=("camera_position", "look_at", "up_vector"), + broadcast_compatible=False) + + z_axis = tf.linalg.l2_normalize(look_at - camera_position, axis=-1) + horizontal_axis = tf.linalg.l2_normalize( + vector.cross(z_axis, up_vector), axis=-1) + vertical_axis = vector.cross(horizontal_axis, z_axis) + + batch_shape = tf.shape(input=horizontal_axis)[:-1] + zeros = tf.zeros( + shape=tf.concat((batch_shape, (3,)), axis=-1), + dtype=horizontal_axis.dtype) + one = tf.ones( + shape=tf.concat((batch_shape, (1,)), axis=-1), + dtype=horizontal_axis.dtype) + x = tf.concat( + (horizontal_axis, -vector.dot(horizontal_axis, camera_position)), + axis=-1) + y = tf.concat((vertical_axis, -vector.dot(vertical_axis, camera_position)), + axis=-1) + z = tf.concat((-z_axis, vector.dot(z_axis, camera_position)), axis=-1) + w = tf.concat((zeros, one), axis=-1) + return tf.stack((x, y, z, w), axis=-2) + + +def model_to_eye(point_model_space, + camera_position, + look_at, + up_vector, + name=None): + """Transforms points from model to eye coordinates. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + point_model_space: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents the 3D points in model space. + camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents the 3D position of the camera. + look_at: A tensor of shape `[A1, ..., An, 3]`, with the last dimension + storing the position where the camera is looking at. + up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension + defines the up vector of the camera. + name: A name for this op. Defaults to 'model_to_eye'. + + Raises: + ValueError: if the all the inputs are not of the same shape, or if any input + of of an unsupported shape. + + Returns: + A tensor of shape `[A1, ..., An, 3]`, containing `point_model_space` in eye + coordinates. + """ + with tf.compat.v1.name_scope( + name, "model_to_eye", + [point_model_space, camera_position, look_at, up_vector]): + point_model_space = tf.convert_to_tensor(value=point_model_space) + camera_position = tf.convert_to_tensor(value=camera_position) + look_at = tf.convert_to_tensor(value=look_at) + up_vector = tf.convert_to_tensor(value=up_vector) + + shape.check_static( + tensor=point_model_space, + tensor_name="point_model_space", + has_dim_equals=(-1, 3)) + shape.check_static( + tensor=camera_position, + tensor_name="camera_position", + has_dim_equals=(-1, 3)) + shape.check_static( + tensor=look_at, tensor_name="look_at", has_dim_equals=(-1, 3)) + shape.check_static( + tensor=up_vector, tensor_name="up_vector", has_dim_equals=(-1, 3)) + shape.compare_batch_dimensions( + tensors=(point_model_space, camera_position, look_at, up_vector), + last_axes=-2, + tensor_names=("point_model_space", "camera_position", "look_at", + "up_vector"), + broadcast_compatible=False) + + model_to_eye_matrix = look_at_right_handed(camera_position, look_at, + up_vector) + batch_shape = tf.shape(input=point_model_space)[:-1] + one = tf.ones( + shape=tf.concat((batch_shape, (1,)), axis=-1), + dtype=point_model_space.dtype) + point_model_space = tf.concat((point_model_space, one), axis=-1) + point_model_space = tf.expand_dims(point_model_space, axis=-1) + res = tf.squeeze(tf.matmul(model_to_eye_matrix, point_model_space), axis=-1) + return res[..., :-1] + + +def eye_to_clip(point_eye_space, + vertical_field_of_view, + aspect_ratio, + near, + far, + name=None): + """Transforms points from eye to clip space. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + point_eye_space: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents the 3D points in eye coordinates. + vertical_field_of_view: A tensor of shape `[A1, ..., An, 1]`, where the last + dimension represents the vertical field of view of the frustum. Note that + values for `vertical_field_of_view` must be in the range ]0,pi[. + aspect_ratio: A tensor of shape `[A1, ..., An, 1]`, where the last dimension + stores the width over height ratio of the frustum. Note that values for + `aspect_ratio` must be non-negative. + near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension + captures the distance between the viewer and the near clipping plane. Note + that values for `near` must be non-negative. + far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension captures + the distance between the viewer and the far clipping plane. Note that + values for `far` must be non-negative. + name: A name for this op. Defaults to 'eye_to_clip'. + + Raises: + ValueError: If any input is of an unsupported shape. + + Returns: + A tensor of shape `[A1, ..., An, 4]`, containing `point_eye_space` in + homogeneous clip coordinates. + """ + with tf.compat.v1.name_scope( + name, "eye_to_clip", + [point_eye_space, vertical_field_of_view, aspect_ratio, near, far]): + point_eye_space = tf.convert_to_tensor(value=point_eye_space) + vertical_field_of_view = tf.convert_to_tensor(value=vertical_field_of_view) + aspect_ratio = tf.convert_to_tensor(value=aspect_ratio) + near = tf.convert_to_tensor(value=near) + far = tf.convert_to_tensor(value=far) + + shape.check_static( + tensor=point_eye_space, + tensor_name="point_eye_space", + has_dim_equals=(-1, 3)) + shape.check_static( + tensor=vertical_field_of_view, + tensor_name="vertical_field_of_view", + has_dim_equals=(-1, 1)) + shape.check_static( + tensor=aspect_ratio, tensor_name="aspect_ratio", has_dim_equals=(-1, 1)) + shape.check_static(tensor=near, tensor_name="near", has_dim_equals=(-1, 1)) + shape.check_static(tensor=far, tensor_name="far", has_dim_equals=(-1, 1)) + shape.compare_batch_dimensions( + tensors=(point_eye_space, vertical_field_of_view, aspect_ratio, near, + far), + last_axes=-2, + tensor_names=("point_eye_space", "vertical_field_of_view", + "aspect_ratio", "near", "far"), + broadcast_compatible=True) + + perspective_matrix = perspective_rh(vertical_field_of_view, aspect_ratio, + near, far) + perspective_matrix = tf.squeeze(perspective_matrix, axis=-3) + batch_shape = tf.shape(input=point_eye_space)[:-1] + one = tf.ones( + shape=tf.concat((batch_shape, (1,)), axis=-1), + dtype=point_eye_space.dtype) + point_eye_space = tf.concat((point_eye_space, one), axis=-1) + point_eye_space = tf.expand_dims(point_eye_space, axis=-1) + + return tf.squeeze(tf.matmul(perspective_matrix, point_eye_space), axis=-1) + + +def clip_to_ndc(point_clip_space, name=None): + """Transforms points from clip to normalized device coordinates (ndc). + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last + dimension represents points in clip space. + name: A name for this op. Defaults to 'clip_to_ndc'. + + Raises: + ValueError: If `point_clip_space` is not of size 4 in its last dimension. + + Returns: + A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in + normalized device coordinates. + """ + with tf.compat.v1.name_scope(name, "clip_to_ndc", [point_clip_space]): + point_clip_space = tf.convert_to_tensor(value=point_clip_space) + + shape.check_static( + tensor=point_clip_space, + tensor_name="point_clip_space", + has_dim_equals=(-1, 4)) + + w = point_clip_space[..., -1] + w = tf.expand_dims(w, axis=-1) + return point_clip_space[..., :3] / w + + +def ndc_to_screen(point_ndc_space, + lower_left_corner, + screen_dimensions, + near, + far, + name=None): + """Transforms points from normalized device coordinates to screen coordinates. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + point_ndc_space: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents points in normalized device coordinates. + lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last + dimension captures the position (in pixels) of the lower left corner of + the screen. + screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last + dimension is expressed in pixels and captures the width and the height (in + pixels) of the screen. + near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension + captures the distance between the viewer and the near clipping plane. Note + that values for `near` must be non-negative. + far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension + captures the distance between the viewer and the far clipping plane. Note + that values for `far` must be greater than those of `near`. + name: A name for this op. Defaults to 'ndc_to_screen'. + + Raises: + InvalidArgumentError: if any input contains data not in the specified range + of valid values. + ValueError: If any input is of an unsupported shape. + + Returns: + A tensor of shape `[A1, ..., An, 2]`, containing `point_ndc_space` in + screen coordinates (pixels). + """ + with tf.compat.v1.name_scope( + name, "ndc_to_screen", + [point_ndc_space, lower_left_corner, screen_dimensions, near, far]): + point_ndc_space = tf.convert_to_tensor(value=point_ndc_space) + lower_left_corner = tf.convert_to_tensor(value=lower_left_corner) + screen_dimensions = tf.convert_to_tensor(value=screen_dimensions) + near = tf.convert_to_tensor(value=near) + far = tf.convert_to_tensor(value=far) + + shape.check_static( + tensor=point_ndc_space, + tensor_name="point_ndc_space", + has_dim_equals=(-1, 3)) + shape.check_static( + tensor=lower_left_corner, + tensor_name="lower_left_corner", + has_dim_equals=(-1, 2)) + shape.check_static( + tensor=screen_dimensions, + tensor_name="screen_dimensions", + has_dim_equals=(-1, 2)) + shape.check_static(tensor=near, tensor_name="near", has_dim_equals=(-1, 1)) + shape.check_static(tensor=far, tensor_name="far", has_dim_equals=(-1, 1)) + + shape.compare_batch_dimensions( + tensors=(point_ndc_space, lower_left_corner, screen_dimensions, near, + far), + last_axes=-2, + tensor_names=("point_ndc_space", "lower_left_corner", + "screen_dimensions", "near", "far"), + broadcast_compatible=False) + + screen_dimensions = asserts.assert_all_above( + screen_dimensions, 0.0, open_bound=True) + near = asserts.assert_all_above(near, 0.0, open_bound=True) + far = asserts.assert_all_above(far, near, open_bound=True) + + ndc_to_screen_factor = tf.concat( + (screen_dimensions, far - near), axis=-1) / 2.0 + screen_center = tf.concat( + (lower_left_corner + screen_dimensions / 2.0, (near + far) / 2.0), + axis=-1) + return ndc_to_screen_factor * point_ndc_space + screen_center + + +def model_to_screen(point_model_space, + camera_position, + look_at, + up_vector, + vertical_field_of_view, + screen_dimensions, + near, + far, + lower_left_corner, + name=None): + """Transforms points from model to screen coordinates. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + point_model_space: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents the 3D points in model space. + camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last + dimension represents the 3D position of the camera. + look_at: A tensor of shape `[A1, ..., An, 3]`, with the last dimension + storing the position where the camera is looking at. + up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension + defines the up vector of the camera. + vertical_field_of_view: A tensor of shape `[A1, ..., An, 1]`, where the last + dimension represents the vertical field of view of the frustum. Note that + values for `vertical_field_of_view` must be in the range ]0,pi[. + screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last + dimension is expressed in pixels and captures the width and the height (in + pixels) of the screen. + near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension + captures the distance between the viewer and the near clipping plane. Note + that values for `near` must be non-negative. + far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension + captures the distance between the viewer and the far clipping plane. Note + that values for `far` must be greater than those of `near`. + lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last + dimension captures the position (in pixels) of the lower left corner of + the screen. + name: A name for this op. Defaults to 'model_to_screen'. + + Raises: + InvalidArgumentError: if any input contains data not in the specified range + of valid values. + ValueError: If any input is of an unsupported shape. + + Returns: + A tensor of shape `[A1, ..., An, 2]`, containing the projection of + `point_model_space` in + screen coordinates (pixels). + """ + with tf.compat.v1.name_scope(name, "model_to_screen", [ + point_model_space, camera_position, look_at, up_vector, + vertical_field_of_view, screen_dimensions, near, far, lower_left_corner + ]): + screen_dimensions = tf.convert_to_tensor(value=screen_dimensions) + shape.check_static( + tensor=screen_dimensions, + tensor_name="screen_dimensions", + has_dim_equals=(-1, 2)) + + point_eye_space = model_to_eye(point_model_space, camera_position, look_at, + up_vector) + point_clip_space = eye_to_clip( + point_eye_space, vertical_field_of_view, + screen_dimensions[..., 0:1] / screen_dimensions[..., 1:2], near, far) + point_ndc_space = clip_to_ndc(point_clip_space) + point_screen_space = ndc_to_screen(point_ndc_space, lower_left_corner, + screen_dimensions, near, far) + return point_screen_space, point_clip_space[..., 3:4] + + +# API contains all public functions and classes. +__all__ = export_api.get_functions_and_classes() diff --git a/tensorflow_graphics/rendering/opengl/tests/math_test.py b/tensorflow_graphics/rendering/opengl/tests/math_test.py new file mode 100644 index 000000000..a25ce9a9d --- /dev/null +++ b/tensorflow_graphics/rendering/opengl/tests/math_test.py @@ -0,0 +1,829 @@ +#Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for OpenGL math routines.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow_graphics.rendering.opengl import math as glm +from tensorflow_graphics.util import test_case + + +class MathTest(test_case.TestCase): + + def test_perspective_rh_preset(self): + """Tests that perspective_rh generates expected results..""" + vertical_field_of_view = (60.0 * math.pi / 180.0, 50.0 * math.pi / 180.0) + aspect_ratio = (1.5, 1.1) + near = (1.0, 1.2) + far = (10.0, 5.0) + + pred = glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, far) + gt = (((1.15470052, 0.0, 0.0, 0.0), (0.0, 1.73205066, 0.0, 0.0), + (0.0, 0.0, -1.22222221, -2.22222233), (0.0, 0.0, -1.0, 0.0)), + ((1.9495517, 0.0, 0.0, 0.0), (0.0, 2.14450693, 0.0, 0.0), + (0.0, 0.0, -1.63157892, -3.15789485), (0.0, 0.0, -1.0, 0.0))) + self.assertAllClose(pred, gt) + + @parameterized.parameters( + ((1,), (1,), (1,), (1,)), + ((None, 2), (None, 2), (None, 2), (None, 2)), + ) + def test_perspective_rh_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.perspective_rh, shapes) + + @parameterized.parameters( + ("Not all batch dimensions are identical", (3,), (3, 3), (3, 3), (3, 3)), + ("Not all batch dimensions are identical", (2, 3), (3, 3), (3, 3), + (3, 3)), + ) + def test_perspective_rh_exception_raised(self, error_msg, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised(glm.perspective_rh, error_msg, shapes) + + def test_perspective_rh_exception_near_raised(self): + """Tests that an exception is raised when `near` is not strictly positive.""" + vertical_field_of_view = np.array((1.0,)) + aspect_ratio = np.array((1.0,)) + near = np.random.uniform(-1.0, 0.0, size=(1,)) + far = np.array((1.0,)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, far)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, + np.array((0.0,)), far)) + + def test_perspective_rh_exception_far_raised(self): + """Tests that an exception is raised when `far` is not strictly positive.""" + vertical_field_of_view = np.array((1.0,)) + aspect_ratio = np.array((1.0,)) + near = np.array((1.0,)) + far = np.random.uniform(-1.0, 0.0, size=(1,)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, far)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, + np.array((0.0,)))) + + def test_perspective_rh_exception_aspect_ratio_raised(self): + """Tests that an exception is raised when `aspect_ratio` is not strictly positive.""" + vertical_field_of_view = np.array((1.0,)) + aspect_ratio = np.random.uniform(-1.0, 0.0, size=(1,)) + near = np.array((1.0,)) + far = np.array((1.0,)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, far)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, np.array((0.0,)), near, + far)) + + def test_perspective_rh_exception_vertical_field_of_view_raised(self): + """Tests that an exception is raised when `vertical_field_of_view` has unexpected values.""" + vertical_field_of_view = np.random.uniform(-math.pi, 0.0, size=(1,)) + aspect_ratio = np.array((1.0,)) + near = np.array((1.0,)) + far = np.array((1.0,)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, far)) + + vertical_field_of_view = np.random.uniform( + math.pi, 2.0 * math.pi, size=(1,)) + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(vertical_field_of_view, aspect_ratio, near, far)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(np.array((0.0,)), aspect_ratio, near, far)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.perspective_rh(np.array((math.pi,)), aspect_ratio, near, far)) + + def test_perspective_rh_jacobian_preset(self): + """Tests the Jacobian of test_perspective_rh.""" + vertical_field_of_view_init = np.array((1.0,)) + aspect_ratio_init = np.array((1.0,)) + near_init = np.array((1.0,)) + far_init = np.array((10.0,)) + + vertical_field_of_view_tensor = tf.identity( + tf.convert_to_tensor(value=vertical_field_of_view_init)) + aspect_ratio_tensor = tf.identity( + tf.convert_to_tensor(value=aspect_ratio_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y = glm.perspective_rh(vertical_field_of_view_tensor, aspect_ratio_tensor, + near_tensor, far_tensor) + + self.assert_jacobian_is_correct(vertical_field_of_view_tensor, + vertical_field_of_view_init, y) + self.assert_jacobian_is_correct(aspect_ratio_tensor, aspect_ratio_init, y) + self.assert_jacobian_is_correct(near_tensor, near_init, y) + self.assert_jacobian_is_correct(far_tensor, far_init, y) + + def test_perspective_rh_cross_jacobian_random(self): + """Tests the Jacobian of perspective_rh.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + eps = np.finfo(np.float64).eps + vertical_field_of_view_init = np.random.uniform( + eps, math.pi - eps, size=tensor_shape) + aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape) + near_init = np.random.uniform(eps, 100.0, size=tensor_shape) + far_init = np.random.uniform(eps, 100.0, size=tensor_shape) + + vertical_field_of_view_tensor = tf.identity( + tf.convert_to_tensor(value=vertical_field_of_view_init)) + aspect_ratio_tensor = tf.identity( + tf.convert_to_tensor(value=aspect_ratio_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y = glm.perspective_rh(vertical_field_of_view_tensor, aspect_ratio_tensor, + near_tensor, far_tensor) + + self.assert_jacobian_is_correct(vertical_field_of_view_tensor, + vertical_field_of_view_init, y) + self.assert_jacobian_is_correct(aspect_ratio_tensor, aspect_ratio_init, y) + self.assert_jacobian_is_correct(near_tensor, near_init, y) + self.assert_jacobian_is_correct(far_tensor, far_init, y) + + def test_look_at_right_handed_preset(self): + """Tests that look_at_right_handed generates expected results..""" + camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)) + look_at = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)) + up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)) + + pred = glm.look_at_right_handed(camera_position, look_at, up_vector) + gt = (((-1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0), + (0.0, 0.0, 0.0, 1.0)), + ((4.08248186e-01, -8.16496551e-01, 4.08248395e-01, -2.98023224e-08), + (-7.07106888e-01, 1.19209290e-07, 7.07106769e-01, -1.41421378e-01), + (-5.77350318e-01, -5.77350318e-01, -5.77350318e-01, + 3.46410215e-01), (0.0, 0.0, 0.0, 1.0))) + self.assertAllClose(pred, gt) + + @parameterized.parameters( + ((3,), (3,), (3,)), + ((None, 3), (None, 3), (None, 3)), + ((None, 2, 3), (None, 2, 3), (None, 2, 3)), + ) + def test_look_at_right_handed_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.look_at_right_handed, shapes) + + @parameterized.parameters( + ("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,)), + ("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,)), + ("must have exactly 3 dimensions in axis -1", (3,), (3,), (1,)), + ("Not all batch dimensions are identical", (3,), (3, 3), (3, 3)), + ) + def test_look_at_right_handed_exception_raised(self, error_msg, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised(glm.look_at_right_handed, error_msg, shapes) + + def test_look_at_right_handed_jacobian_preset(self): + """Tests the Jacobian of look_at_right_handed.""" + camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))) + look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))) + up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))) + camera_position_tensor = tf.convert_to_tensor(value=camera_position_init) + look_at_tensor = tf.convert_to_tensor(value=look_at_init) + up_vector_tensor = tf.convert_to_tensor(value=up_vector_init) + y = glm.look_at_right_handed(camera_position_tensor, look_at_tensor, + up_vector_tensor) + + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y) + self.assert_jacobian_is_correct(up_vector_tensor, up_vector_init, y) + + def test_look_at_right_handed_jacobian_random(self): + """Tests the Jacobian of look_at_right_handed.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + camera_position_init = np.random.uniform(size=tensor_shape + [3]) + look_at_init = np.random.uniform(size=tensor_shape + [3]) + up_vector_init = np.random.uniform(size=tensor_shape + [3]) + camera_position_tensor = tf.convert_to_tensor(value=camera_position_init) + look_at_tensor = tf.convert_to_tensor(value=look_at_init) + up_vector_tensor = tf.convert_to_tensor(value=up_vector_init) + y = glm.look_at_right_handed(camera_position_tensor, look_at_tensor, + up_vector_tensor) + + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y) + self.assert_jacobian_is_correct(up_vector_tensor, up_vector_init, y) + + def test_model_to_eye_preset(self): + """Tests that model_to_eye generates expected results..""" + point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)) + camera_position = ((0.0, 0.0, 0.0), (0.1, 0.2, 0.3)) + look_at = ((0.0, 0.0, 1.0), (0.4, 0.5, 0.6)) + up_vector = ((0.0, 1.0, 0.0), (0.7, 0.8, 0.9)) + + pred = glm.model_to_eye(point, camera_position, look_at, up_vector) + gt = ((-2.0, 3.0, -4.0), (2.08616257e-07, 1.27279234, -6.58179379)) + self.assertAllClose(pred, gt) + + @parameterized.parameters( + ((3,), (3,), (3,), (3,)), + ((None, 3), (None, 3), (None, 3), (None, 3)), + ((None, 2, 3), (None, 2, 3), (None, 2, 3), (None, 2, 3)), + ) + def test_model_to_eye_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.model_to_eye, shapes) + + @parameterized.parameters( + ("must have exactly 3 dimensions in axis -1", (2,), (3,), (3,), (3,)), + ("must have exactly 3 dimensions in axis -1", (3,), (2,), (3,), (3,)), + ("must have exactly 3 dimensions in axis -1", (3,), (3,), (2,), (3,)), + ("must have exactly 3 dimensions in axis -1", (3,), (3,), (3,), (2,)), + ("Not all batch dimensions are identical", (3,), (3, 3), (3, 3), (3, 3)), + ) + def test_model_to_eye_exception_raised(self, error_msg, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised(glm.model_to_eye, error_msg, shapes) + + def test_model_to_eye_jacobian_preset(self): + """Tests the Jacobian of model_to_eye.""" + point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))) + camera_position_init = np.array(((0.0, 0.0, 0.0), (0.1, 0.2, 0.3))) + look_at_init = np.array(((0.0, 0.0, 1.0), (0.4, 0.5, 0.6))) + up_vector_init = np.array(((0.0, 1.0, 0.0), (0.7, 0.8, 0.9))) + point_tensor = tf.convert_to_tensor(value=point_init) + camera_position_tensor = tf.convert_to_tensor(value=camera_position_init) + look_at_tensor = tf.convert_to_tensor(value=look_at_init) + up_vector_tensor = tf.convert_to_tensor(value=up_vector_init) + y = glm.model_to_eye(point_tensor, camera_position_tensor, look_at_tensor, + up_vector_tensor) + + self.assert_jacobian_is_correct(point_tensor, point_init, y) + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y) + self.assert_jacobian_is_correct(up_vector_tensor, up_vector_init, y) + + def test_model_to_eye_jacobian_random(self): + """Tests the Jacobian of model_to_eye.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + point_init = np.random.uniform(size=tensor_shape + [3]) + camera_position_init = np.random.uniform(size=tensor_shape + [3]) + look_at_init = np.random.uniform(size=tensor_shape + [3]) + up_vector_init = np.random.uniform(size=tensor_shape + [3]) + point_tensor = tf.convert_to_tensor(value=point_init) + camera_position_tensor = tf.convert_to_tensor(value=camera_position_init) + look_at_tensor = tf.convert_to_tensor(value=look_at_init) + up_vector_tensor = tf.convert_to_tensor(value=up_vector_init) + y = glm.model_to_eye(point_tensor, camera_position_tensor, look_at_tensor, + up_vector_tensor) + + self.assert_jacobian_is_correct(point_tensor, point_init, y) + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y) + self.assert_jacobian_is_correct(up_vector_tensor, up_vector_init, y) + + def test_eye_to_clip_preset(self): + """Tests that eye_to_clip generates expected results.""" + point = ((2.0, 3.0, 4.0), (3.0, 4.0, 5.0)) + fov = ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,)) + aspect_ratio = ((1.5,), (1.6,)) + near_plane = ((1.0,), (2.0,)) + far_plane = ((10.0,), (11.0,)) + + pred = glm.eye_to_clip(point, fov, aspect_ratio, near_plane, far_plane) + gt = ((2.30940104, 5.19615173, -7.11111116, -4.0), (4.02095032, 8.57802773, + -12.11111069, -5.0)) + self.assertAllClose(pred, gt) + + @parameterized.parameters( + ((3,), (1,), (1,), (1,), (1,)), + ((None, 3), (None, 1), (None, 1), (None, 1), (None, 1)), + ((None, 5, 3), (None, 5, 1), (None, 5, 1), (None, 5, 1), (None, 5, 1)), + ) + def test_eye_to_clip_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.eye_to_clip, shapes) + + @parameterized.parameters( + ("must have exactly 3 dimensions in axis -1", (2,), (1,), (1,), (1,), + (1,)), + ("must have exactly 1 dimensions in axis -1", (3,), (2,), (1,), (1,), + (1,)), + ("must have exactly 1 dimensions in axis -1", (3,), (1,), (2,), (1,), + (1,)), + ("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (2,), + (1,)), + ("must have exactly 1 dimensions in axis -1", (3,), (1,), (1,), (1,), + (2,)), + ("Not all batch dimensions are broadcast-compatible", (3, 3), (2, 1), + (1,), (1,), (1,)), + ) + def test_eye_to_clip_exception_raised(self, error_msg, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised(glm.eye_to_clip, error_msg, shapes) + + def test_eye_to_clip_jacobian_preset(self): + """Tests the Jacobian of eye_to_clip.""" + point_init = np.array(((2.0, 3.0, 4.0), (3.0, 4.0, 5.0))) + vertical_field_of_view_init = np.array( + ((60.0 * math.pi / 180.0,), (50.0 * math.pi / 180.0,))) + aspect_ratio_init = np.array(((1.5,), (1.6,))) + near_init = np.array(((1.0,), (2.0,))) + far_init = np.array(((10.0,), (11.0,))) + + point_tensor = tf.convert_to_tensor(value=point_init) + vertical_field_of_view_tensor = tf.identity( + tf.convert_to_tensor(value=vertical_field_of_view_init)) + aspect_ratio_tensor = tf.identity( + tf.convert_to_tensor(value=aspect_ratio_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + y = glm.eye_to_clip(point_tensor, vertical_field_of_view_tensor, + aspect_ratio_tensor, near_tensor, far_tensor) + + self.assert_jacobian_is_correct(point_tensor, point_init, y) + self.assert_jacobian_is_correct( + vertical_field_of_view_tensor, + vertical_field_of_view_init, + y, + atol=1e-5) + self.assert_jacobian_is_correct( + aspect_ratio_tensor, aspect_ratio_init, y, atol=1e-5) + self.assert_jacobian_is_correct(near_tensor, near_init, y, atol=1e-5) + self.assert_jacobian_is_correct(far_tensor, far_init, y, atol=1e-5) + + def test_eye_to_clip_jacobian_random(self): + """Tests the Jacobian of eye_to_clip.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + point_init = np.random.uniform(size=tensor_shape + [3]) + eps = np.finfo(np.float64).eps + vertical_field_of_view_init = np.random.uniform( + eps, math.pi - eps, size=tensor_shape + [1]) + aspect_ratio_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) + near_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) + far_init = np.random.uniform(eps, 100.0, size=tensor_shape + [1]) + + point_tensor = tf.convert_to_tensor(value=point_init) + vertical_field_of_view_tensor = tf.identity( + tf.convert_to_tensor(value=vertical_field_of_view_init)) + aspect_ratio_tensor = tf.identity( + tf.convert_to_tensor(value=aspect_ratio_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y = glm.eye_to_clip(point_tensor, vertical_field_of_view_tensor, + aspect_ratio_tensor, near_tensor, far_tensor) + + self.assert_jacobian_is_correct(point_tensor, point_init, y) + self.assert_jacobian_is_correct( + vertical_field_of_view_tensor, + vertical_field_of_view_init, + y, + atol=1e-5) + self.assert_jacobian_is_correct( + aspect_ratio_tensor, aspect_ratio_init, y, atol=1e-5) + self.assert_jacobian_is_correct(near_tensor, near_init, y, atol=1e-5) + self.assert_jacobian_is_correct(far_tensor, far_init, y, atol=1e-5) + + def test_clip_to_ndc_preset(self): + """Tests that clip_to_ndc generates expected results.""" + point = ((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0)) + pred = glm.clip_to_ndc(point) + gt = ((2.0, 4.0, 8.0), (4.0, 8.0, 16.0)) + self.assertAllClose(pred, gt) + + @parameterized.parameters( + ((4,)), + ((None, 4),), + ((None, 5, 4),), + ) + def test_clip_to_ndc_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.clip_to_ndc, shapes) + + @parameterized.parameters( + ("must have exactly 4 dimensions in axis -1", (2,)),) + def test_clip_to_ndc_exception_raised(self, error_msg, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised(glm.clip_to_ndc, error_msg, shapes) + + def test_clip_to_ndc_jacobian_preset(self): + """Tests the Jacobian of clip_to_ndc.""" + point_init = np.array(((4.0, 8.0, 16.0, 2.0), (4.0, 8.0, 16.0, 1.0))) + point_tensor = tf.convert_to_tensor(value=point_init) + y = glm.clip_to_ndc(point_tensor) + self.assert_jacobian_is_correct(point_tensor, point_init, y) + + def test_clip_to_ndc_jacobian_random(self): + """Tests the Jacobian of clip_to_ndc.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + point_init = np.random.uniform(size=tensor_shape + [4]) + point_tensor = tf.convert_to_tensor(value=point_init) + y = glm.clip_to_ndc(point_tensor) + self.assert_jacobian_is_correct(point_tensor, point_init, y) + + def test_ndc_to_screen_preset(self): + """Tests that ndc_to_screen generates expected results.""" + point = ((1.1, 2.2, 3.3), (5.1, 5.2, 5.3)) + lower_left_corner = ((6.4, 4.8), (0.0, 0.0)) + screen_dimensions = ((640.0, 480.0), (300.0, 400.0)) + near = ((1.0,), (11.0,)) + far = ((10.0,), (100.0,)) + pred = glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, + far) + gt = ((678.40002441, 772.79998779, 20.34999847), (915.0, 1240.0, + 291.3500061)) + self.assertAllClose(pred, gt) + + @parameterized.parameters( + ((3,), (2,), (2,), (1,), (1,)), + ((None, 3), (None, 2), (None, 2), (None, 1), (None, 1)), + ((None, 5, 3), (None, 5, 2), (None, 5, 2), (None, 5, 1), (None, 5, 1)), + ) + def test_ndc_to_screen_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.ndc_to_screen, shapes) + + @parameterized.parameters( + ("must have exactly 3 dimensions in axis -1", (2,), (2,), (2,), (1,), + (1,)), + ("must have exactly 2 dimensions in axis -1", (3,), (1,), (2,), (1,), + (1,)), + ("must have exactly 2 dimensions in axis -1", (3,), (2,), (3,), (1,), + (1,)), + ("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (2,), + (1,)), + ("must have exactly 1 dimensions in axis -1", (3,), (2,), (2,), (1,), + (3,)), + ("Not all batch dimensions are identical", (1, 3), (2,), (2,), (1,), + (1,)), + ) + def test_ndc_to_screen_exception_raised(self, error_msg, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised(glm.ndc_to_screen, error_msg, shapes) + + def test_ndc_to_screen_exception_near_raised(self): + """Tests that an exception is raised when `near` is not strictly positive.""" + + point = np.random.uniform(size=(3,)) + lower_left_corner = np.random.uniform(size=(2,)) + screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,)) + near = np.random.uniform(-1.0, 0.0, size=(1,)) + far = np.random.uniform(1.0, 2.0, size=(1,)) + + with self.subTest("negative_near"): + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, + far)) + + with self.subTest("zero_near"): + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, + np.array((0.0,)), far)) + + def test_ndc_to_screen_exception_far_raised(self): + """Tests that an exception is raised if `far` is not greater than `near`.""" + point = np.random.uniform(size=(3,)) + lower_left_corner = np.random.uniform(size=(2,)) + screen_dimensions = np.random.uniform(1.0, 2.0, size=(2,)) + near = np.random.uniform(1.0, 10.0, size=(1,)) + far = near + np.random.uniform(-1.0, 0.0, size=(1,)) + + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, + far)) + + def test_ndc_to_screen_exception_screen_dimensions_raised(self): + """Tests that an exception is raised when `screen_dimensions` is not strictly positive.""" + point = np.random.uniform(size=(3,)) + lower_left_corner = np.random.uniform(size=(2,)) + screen_dimensions = np.random.uniform(-1.0, 0.0, size=(2,)) + near = np.random.uniform(1.0, 10.0, size=(1,)) + far = near + np.random.uniform(0.1, 1.0, size=(1,)) + + with self.subTest("negative_screen_dimensions"): + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.ndc_to_screen(point, lower_left_corner, screen_dimensions, near, + far)) + + with self.subTest("zero_screen_dimensions"): + with self.assertRaises(tf.errors.InvalidArgumentError): + self.evaluate( + glm.ndc_to_screen(point, lower_left_corner, np.array((0.0, 0.0)), + near, far)) + + def test_ndc_to_screen_jacobian_preset(self): + """Tests the Jacobian of ndc_to_screen.""" + point_init = np.array(((1.1, 2.2, 3.3), (5.1, 5.2, 5.3))) + lower_left_corner_init = np.array(((6.4, 4.8), (0.0, 0.0))) + screen_dimensions_init = np.array(((640.0, 480.0), (300.0, 400.0))) + near_init = np.array(((1.0,), (11.0,))) + far_init = np.array(((10.0,), (100.0,))) + + point_tensor = tf.convert_to_tensor(value=point_init) + lower_left_corner_tensor = tf.convert_to_tensor( + value=lower_left_corner_init) + screen_dimensions_tensor = tf.identity( + tf.convert_to_tensor(value=screen_dimensions_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y = glm.ndc_to_screen(point_tensor, lower_left_corner_tensor, + screen_dimensions_tensor, near_tensor, far_tensor) + self.assert_jacobian_is_correct(point_tensor, point_init, y) + self.assert_jacobian_is_correct(lower_left_corner_tensor, + lower_left_corner_init, y) + self.assert_jacobian_is_correct(screen_dimensions_tensor, + screen_dimensions_init, y) + self.assert_jacobian_is_correct(near_tensor, near_init, y) + self.assert_jacobian_is_correct(far_tensor, far_init, y) + + def test_ndc_to_screen_jacobian_random(self): + """Tests the Jacobian of ndc_to_screen.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + point_init = np.random.uniform(size=tensor_shape + [3]) + lower_left_corner_init = np.random.uniform(size=tensor_shape + [2]) + screen_dimensions_init = np.random.uniform( + 1.0, 1000.0, size=tensor_shape + [2]) + near_init = np.random.uniform(1.0, 10.0, size=tensor_shape + [1]) + far_init = near_init + np.random.uniform(0.1, 1.0, size=(1,)) + + point_tensor = tf.convert_to_tensor(value=point_init) + lower_left_corner_tensor = tf.convert_to_tensor( + value=lower_left_corner_init) + screen_dimensions_tensor = tf.identity( + tf.convert_to_tensor(value=screen_dimensions_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y = glm.ndc_to_screen(point_tensor, lower_left_corner_tensor, + screen_dimensions_tensor, near_tensor, far_tensor) + self.assert_jacobian_is_correct(point_tensor, point_init, y) + self.assert_jacobian_is_correct(lower_left_corner_tensor, + lower_left_corner_init, y) + self.assert_jacobian_is_correct(screen_dimensions_tensor, + screen_dimensions_init, y) + self.assert_jacobian_is_correct(near_tensor, near_init, y) + self.assert_jacobian_is_correct(far_tensor, far_init, y) + + def test_model_to_screen_preset(self): + """Tests that model_to_screen generates expected results.""" + point_world_space = ((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1)) + camera_position = ((0.0, 0.0, 0.0), (0.4, -0.8, 0.1)) + camera_up = ((0.0, 1.0, 0.0), (0.0, 0.0, 1.0)) + look_at = ((0.0, 0.0, 1.0), (0.0, 1.0, 0.0)) + vertical_field_of_view = ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,)) + lower_left_corner = ((0.0, 0.0), (10.0, 20.0)) + screen_dimensions = ((501.0, 501.0), (400.0, 600.0)) + near = ((0.01,), (1.0,)) + far = ((4.0,), (3.0,)) + pred_screen, pred_w = glm.model_to_screen(point_world_space, + camera_position, look_at, + camera_up, vertical_field_of_view, + screen_dimensions, near, far, + lower_left_corner) + gt_screen = ((-13.23016357, 599.30444336, 4.00215721), + (98.07017517, -95.40383911, 3.1234405)) + gt_w = ((5.1,), (3.42247,)) + self.assertAllClose(pred_screen, gt_screen) + self.assertAllClose(pred_w, gt_w) + + @parameterized.parameters( + ((3,), (3,), (3,), (3,), (1,), (2,), (1,), (1,), (2,)), + ((None, 3), (None, 3), (None, 3), (None, 3), (None, 1), (None, 2), + (None, 1), (None, 1), (None, 2)), + ((None, 1, 3), (None, 1, 3), (None, 1, 3), (None, 1, 3), (None, 1, 1), + (None, 1, 2), (None, 1, 1), (None, 1, 1), (None, 1, 2)), + ) + def test_model_to_screen_exception_not_raised(self, *shapes): + """Tests that the shape exceptions are not raised.""" + self.assert_exception_is_not_raised(glm.model_to_screen, shapes) + + @parameterized.parameters( + ("must have exactly 3 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (2,), (3,), (3,), (3,)), + ("must have exactly 3 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (2,), (3,), (3,)), + ("must have exactly 3 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (3,), (2,), (3,)), + ("must have exactly 3 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (3,), (3,), (2,)), + ("must have exactly 1 dimensions in axis -1", (1.0, 1.0), (1.0, 1.0), + (1.0,), (2.0,), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("must have exactly 2 dimensions in axis -1", (1.0,), (1.0, 1.0, 1.0), + (1.0,), (2.0,), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("must have exactly 1 dimensions in axis -1", (1.0,), (1.0, 1.0), + (1.0, 1.0), (2.0,), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("must have exactly 1 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,), + (2.0, 2.0), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("must have exactly 2 dimensions in axis -1", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0,), (3,), (3,), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (2, 3), (3,), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (2, 3), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (3,), (2, 3), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (3,), (3,), (2, 3)), + ("Not all batch dimensions are identical", ((1.0,),), (1.0, 1.0), (1.0,), + (2.0,), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), ((1.0, 1.0),), (1.0,), + (2.0,), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), ((1.0,),), + (2.0,), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,), + ((2.0,),), (5.0, 3.0), (3,), (3,), (3,), (3,)), + ("Not all batch dimensions are identical", (1.0,), (1.0, 1.0), (1.0,), + (2.0,), ((5.0, 3.0),), (3,), (3,), (3,), (3,)), + ) + def test_model_to_screen_exception_raised(self, error_msg, + vertical_field_of_view, + screen_dimensions, near, far, + lower_left_corner, *shapes): + """Tests that the shape exceptions are properly raised.""" + self.assert_exception_is_raised( + func=glm.model_to_screen, + error_msg=error_msg, + shapes=shapes, + vertical_field_of_view=vertical_field_of_view, + screen_dimensions=screen_dimensions, + near=near, + far=far, + lower_left_corner=lower_left_corner) + + def test_model_to_screen_jacobian_preset(self): + """Tests the Jacobian of model_to_screen.""" + point_world_space_init = np.array(((3.1, 4.1, 5.1), (-1.1, 2.2, -3.1))) + camera_position_init = np.array(((0.0, 0.0, 0.0), (0.4, -0.8, 0.1))) + camera_up_init = np.array(((0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) + look_at_init = np.array(((0.0, 0.0, 1.0), (0.0, 1.0, 0.0))) + vertical_field_of_view_init = np.array( + ((60.0 * math.pi / 180.0,), (65 * math.pi / 180,))) + lower_left_corner_init = np.array(((0.0, 0.0), (10.0, 20.0))) + screen_dimensions_init = np.array(((501.0, 501.0), (400.0, 600.0))) + near_init = np.array(((0.01,), (1.0,))) + far_init = np.array(((4.0,), (3.0,))) + + point_world_space_tensor = tf.convert_to_tensor( + value=point_world_space_init) + camera_position_tensor = tf.convert_to_tensor(value=camera_position_init) + camera_up_tensor = tf.convert_to_tensor(value=camera_up_init) + look_at_tensor = tf.convert_to_tensor(value=look_at_init) + vertical_field_of_view_tensor = tf.identity( + tf.convert_to_tensor(value=vertical_field_of_view_init)) + lower_left_corner_tensor = tf.convert_to_tensor( + value=lower_left_corner_init) + screen_dimensions_tensor = tf.identity( + tf.convert_to_tensor(value=screen_dimensions_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y_p, y_w = glm.model_to_screen(point_world_space_tensor, + camera_position_tensor, look_at_tensor, + camera_up_tensor, + vertical_field_of_view_tensor, + screen_dimensions_tensor, near_tensor, + far_tensor, lower_left_corner_tensor) + self.assert_jacobian_is_correct(point_world_space_tensor, + point_world_space_init, y_p) + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y_p) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y_p) + self.assert_jacobian_is_correct(camera_up_tensor, camera_up_init, y_p) + self.assert_jacobian_is_correct(vertical_field_of_view_tensor, + vertical_field_of_view_init, y_p) + self.assert_jacobian_is_correct(screen_dimensions_tensor, + screen_dimensions_init, y_p) + self.assert_jacobian_is_correct(near_tensor, near_init, y_p) + self.assert_jacobian_is_correct(far_tensor, far_init, y_p) + self.assert_jacobian_is_correct(lower_left_corner_tensor, + lower_left_corner_init, y_p) + + self.assert_jacobian_is_correct(point_world_space_tensor, + point_world_space_init, y_w) + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y_w) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y_w) + self.assert_jacobian_is_correct(camera_up_tensor, camera_up_init, y_w) + self.assert_jacobian_is_correct(vertical_field_of_view_tensor, + vertical_field_of_view_init, y_w) + self.assert_jacobian_is_correct(screen_dimensions_tensor, + screen_dimensions_init, y_w) + self.assert_jacobian_is_correct(near_tensor, near_init, y_w) + self.assert_jacobian_is_correct(far_tensor, far_init, y_w) + + def test_model_to_screen_jacobian_random(self): + """Tests the Jacobian of model_to_screen.""" + tensor_size = np.random.randint(1, 3) + tensor_shape = np.random.randint(1, 5, size=(tensor_size)).tolist() + point_world_space_init = np.random.uniform(size=tensor_shape + [3]) + camera_position_init = np.random.uniform(size=tensor_shape + [3]) + camera_up_init = np.random.uniform(size=tensor_shape + [3]) + look_at_init = np.random.uniform(size=tensor_shape + [3]) + vertical_field_of_view_init = np.random.uniform( + 0.1, 1.0, size=tensor_shape + [1]) + lower_left_corner_init = np.random.uniform(size=tensor_shape + [2]) + screen_dimensions_init = np.random.uniform( + 0.1, 1.0, size=tensor_shape + [2]) + near_init = np.random.uniform(0.1, 1.0, size=tensor_shape + [1]) + far_init = near_init + np.random.uniform(0.1, 1.0, size=tensor_shape + [1]) + + point_world_space_tensor = tf.convert_to_tensor( + value=point_world_space_init) + camera_position_tensor = tf.convert_to_tensor(value=camera_position_init) + camera_up_tensor = tf.convert_to_tensor(value=camera_up_init) + look_at_tensor = tf.convert_to_tensor(value=look_at_init) + vertical_field_of_view_tensor = tf.identity( + tf.convert_to_tensor(value=vertical_field_of_view_init)) + lower_left_corner_tensor = tf.convert_to_tensor( + value=lower_left_corner_init) + screen_dimensions_tensor = tf.identity( + tf.convert_to_tensor(value=screen_dimensions_init)) + near_tensor = tf.identity(tf.convert_to_tensor(value=near_init)) + far_tensor = tf.identity(tf.convert_to_tensor(value=far_init)) + + y_p, y_w = glm.model_to_screen(point_world_space_tensor, + camera_position_tensor, look_at_tensor, + camera_up_tensor, + vertical_field_of_view_tensor, + screen_dimensions_tensor, near_tensor, + far_tensor, lower_left_corner_tensor) + self.assert_jacobian_is_correct(point_world_space_tensor, + point_world_space_init, y_p) + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y_p) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y_p) + self.assert_jacobian_is_correct(camera_up_tensor, camera_up_init, y_p) + self.assert_jacobian_is_correct(vertical_field_of_view_tensor, + vertical_field_of_view_init, y_p) + self.assert_jacobian_is_correct(screen_dimensions_tensor, + screen_dimensions_init, y_p) + self.assert_jacobian_is_correct(near_tensor, near_init, y_p) + self.assert_jacobian_is_correct(far_tensor, far_init, y_p) + self.assert_jacobian_is_correct(lower_left_corner_tensor, + lower_left_corner_init, y_p) + + self.assert_jacobian_is_correct(point_world_space_tensor, + point_world_space_init, y_w) + self.assert_jacobian_is_correct(camera_position_tensor, + camera_position_init, y_w) + self.assert_jacobian_is_correct(look_at_tensor, look_at_init, y_w) + self.assert_jacobian_is_correct(camera_up_tensor, camera_up_init, y_w) + self.assert_jacobian_is_correct(vertical_field_of_view_tensor, + vertical_field_of_view_init, y_w) + self.assert_jacobian_is_correct(screen_dimensions_tensor, + screen_dimensions_init, y_w) + self.assert_jacobian_is_correct(near_tensor, near_init, y_w) + self.assert_jacobian_is_correct(far_tensor, far_init, y_w) + + +if __name__ == "__main__": + test_case.main()