Skip to content

Commit

Permalink
Migrate tensorflow_graphics/rendering/{kernels, opengl, texture, volu…
Browse files Browse the repository at this point in the history
…metric} to using TensorFlow 2.

- tf.compat.v1.name_scope      -> tf.name_scope
- tf.compat.v1.dimension_value -> tf.compat.dimension_value

Don't replace tf.compat.v1.resource_loader.get_path_to_datafile in rendering/tests as there is no TensorFlow v2 equivalent.

Also add keywords to function arguments by the tf_upgrade_v2 utility (see https://www.tensorflow.org/guide/upgrade).

PiperOrigin-RevId: 379249784
  • Loading branch information
tensorflower-gardener authored and Copybara-Service committed Jun 14, 2021
1 parent e7e624a commit 898ed88
Show file tree
Hide file tree
Showing 13 changed files with 169 additions and 212 deletions.
15 changes: 7 additions & 8 deletions tensorflow_graphics/rendering/camera/perspective.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
from __future__ import print_function

import math
from typing import Optional, Tuple
from typing import Tuple
import tensorflow as tf

from tensorflow_graphics.geometry.representation import grid
Expand Down Expand Up @@ -501,7 +501,7 @@ def random_patches(focal: tf.Tensor,
patch_height: int,
patch_width: int,
scale: float = 1.0,
name: Optional[str] = None) -> Tuple[tf.Tensor, tf.Tensor]:
name: str = "random_patches") -> Tuple[tf.Tensor, tf.Tensor]:
"""Sample patches at different scales and from an image.
Args:
Expand All @@ -519,8 +519,7 @@ def random_patches(focal: tf.Tensor,
ray directions in 3D passing from the M*N pixels of the patch and
a tensor of shape `[A1, ..., An, M*N, 2]` with the pixel x, y locations.
"""
with tf.compat.v1.name_scope(name, "random_patches",
[focal, principal_point]):
with tf.name_scope(name):
focal = tf.convert_to_tensor(value=focal)
principal_point = tf.convert_to_tensor(value=principal_point)

Expand All @@ -541,8 +540,9 @@ def random_patches(focal: tf.Tensor,
[patch_height, patch_width]) # storing is in 'ij'
patch_ij = tf.cast(patch_ij, tf.float32)
patch_ij = patch_ij * scale
interm_shape = tf.concat(
[tf.ones_like(batch_shape), tf.shape(patch_ij)], axis=0)
interm_shape = tf.concat([tf.ones_like(batch_shape),
tf.shape(patch_ij)],
axis=0)
patch_ij = tf.reshape(patch_ij, interm_shape)
random_y = tf.random.uniform(
batch_shape,
Expand All @@ -561,8 +561,7 @@ def random_patches(focal: tf.Tensor,
axis=0)
pixels_ij = tf.reshape(pixels_ij, final_shape)
patch_xy = tf.reverse(pixels_ij, axis=[-1])
rays = ray(patch_xy,
tf.expand_dims(focal, -2),
rays = ray(patch_xy, tf.expand_dims(focal, -2),
tf.expand_dims(principal_point, -2))
return rays, patch_xy

Expand Down
11 changes: 5 additions & 6 deletions tensorflow_graphics/rendering/kernels/rasterization_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def rasterize(vertices: tf.Tensor,
image_size: Tuple[int, int],
enable_cull_face: bool,
num_layers: int,
name=None):
name="rasterization_backend_cpu_rasterize"):
"""Rasterizes the scene.
This rasterizer estimates which triangle is associated with each pixel using
Expand All @@ -64,7 +64,7 @@ def rasterize(vertices: tf.Tensor,
and no face culling when False.
num_layers: Number of depth layers to render. Output tensors shape depends
on whether num_layers=1 or not.
name: A name for this op. Defaults to 'rasterization_backend_cpu_rasterize'.
name: A name for this op. Defaults to "rasterization_backend_cpu_rasterize".
Returns:
A Framebuffer containing the rasterized values: barycentrics, triangle_id,
Expand All @@ -79,10 +79,9 @@ def rasterize(vertices: tf.Tensor,
The barycentric coordinates can be used to determine pixel validity instead.
See framebuffer.py for a description of the Framebuffer fields.
"""
with tf.compat.v1.name_scope(name, "rasterization_backend_cpu_rasterize",
(vertices, triangles, view_projection_matrices)):
vertices = tf.convert_to_tensor(vertices)
triangles = tf.convert_to_tensor(triangles)
with tf.name_scope(name):
vertices = tf.convert_to_tensor(value=vertices)
triangles = tf.convert_to_tensor(value=triangles)
view_projection_matrices = tf.convert_to_tensor(
value=view_projection_matrices)

Expand Down
68 changes: 27 additions & 41 deletions tensorflow_graphics/rendering/opengl/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def model_to_eye(point_model_space,
camera_position,
look_at_point,
up_vector,
name=None):
name="model_to_eye"):
"""Transforms points from model to eye coordinates.
Note:
Expand All @@ -43,7 +43,7 @@ def model_to_eye(point_model_space,
storing the position where the camera is looking at.
up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
defines the up vector of the camera.
name: A name for this op. Defaults to 'model_to_eye'.
name: A name for this op. Defaults to "model_to_eye".
Raises:
ValueError: if the all the inputs are not of the same shape, or if any input
Expand All @@ -53,9 +53,7 @@ def model_to_eye(point_model_space,
A tensor of shape `[A1, ..., An, 3]`, containing `point_model_space` in eye
coordinates.
"""
with tf.compat.v1.name_scope(
name, "model_to_eye",
[point_model_space, camera_position, look_at_point, up_vector]):
with tf.name_scope(name):
point_model_space = tf.convert_to_tensor(value=point_model_space)
camera_position = tf.convert_to_tensor(value=camera_position)
look_at_point = tf.convert_to_tensor(value=look_at_point)
Expand Down Expand Up @@ -88,7 +86,7 @@ def eye_to_clip(point_eye_space,
aspect_ratio,
near,
far,
name=None):
name="eye_to_clip"):
"""Transforms points from eye to clip space.
Note:
Expand All @@ -110,7 +108,7 @@ def eye_to_clip(point_eye_space,
far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension captures
the distance between the viewer and the far clipping plane. Note that
values for `far` must be non-negative.
name: A name for this op. Defaults to 'eye_to_clip'.
name: A name for this op. Defaults to "eye_to_clip".
Raises:
ValueError: If any input is of an unsupported shape.
Expand All @@ -119,9 +117,7 @@ def eye_to_clip(point_eye_space,
A tensor of shape `[A1, ..., An, 4]`, containing `point_eye_space` in
homogeneous clip coordinates.
"""
with tf.compat.v1.name_scope(
name, "eye_to_clip",
[point_eye_space, vertical_field_of_view, aspect_ratio, near, far]):
with tf.name_scope(name):
point_eye_space = tf.convert_to_tensor(value=point_eye_space)
vertical_field_of_view = tf.convert_to_tensor(value=vertical_field_of_view)
aspect_ratio = tf.convert_to_tensor(value=aspect_ratio)
Expand Down Expand Up @@ -160,7 +156,7 @@ def eye_to_clip(point_eye_space,
return tf.squeeze(tf.matmul(perspective_matrix, point_eye_space), axis=-1)


def clip_to_ndc(point_clip_space, name=None):
def clip_to_ndc(point_clip_space, name="clip_to_ndc"):
"""Transforms points from clip to normalized device coordinates (ndc).
Note:
Expand All @@ -169,7 +165,7 @@ def clip_to_ndc(point_clip_space, name=None):
Args:
point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last
dimension represents points in clip space.
name: A name for this op. Defaults to 'clip_to_ndc'.
name: A name for this op. Defaults to "clip_to_ndc".
Raises:
ValueError: If `point_clip_space` is not of size 4 in its last dimension.
Expand All @@ -178,7 +174,7 @@ def clip_to_ndc(point_clip_space, name=None):
A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in
normalized device coordinates.
"""
with tf.compat.v1.name_scope(name, "clip_to_ndc", [point_clip_space]):
with tf.name_scope(name):
point_clip_space = tf.convert_to_tensor(value=point_clip_space)

shape.check_static(
Expand All @@ -195,7 +191,7 @@ def ndc_to_screen(point_ndc_space,
screen_dimensions,
near,
far,
name=None):
name="ndc_to_screen"):
"""Transforms points from normalized device coordinates to screen coordinates.
Note:
Expand All @@ -217,7 +213,7 @@ def ndc_to_screen(point_ndc_space,
far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension
captures the distance between the viewer and the far clipping plane. Note
that values for `far` must be greater than those of `near`.
name: A name for this op. Defaults to 'ndc_to_screen'.
name: A name for this op. Defaults to "ndc_to_screen".
Raises:
InvalidArgumentError: if any input contains data not in the specified range
Expand All @@ -228,9 +224,7 @@ def ndc_to_screen(point_ndc_space,
A tensor of shape `[A1, ..., An, 3]`, containing `point_ndc_space` in
screen coordinates.
"""
with tf.compat.v1.name_scope(
name, "ndc_to_screen",
[point_ndc_space, lower_left_corner, screen_dimensions, near, far]):
with tf.name_scope(name):
point_ndc_space = tf.convert_to_tensor(value=point_ndc_space)
lower_left_corner = tf.convert_to_tensor(value=lower_left_corner)
screen_dimensions = tf.convert_to_tensor(value=screen_dimensions)
Expand Down Expand Up @@ -281,7 +275,7 @@ def model_to_screen(point_model_space,
perspective_matrix,
screen_dimensions,
lower_left_corner=(0.0, 0.0),
name=None):
name="model_to_screen"):
"""Transforms points from model to screen coordinates.
Note:
Expand All @@ -307,7 +301,7 @@ def model_to_screen(point_model_space,
lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last
dimension captures the position (in pixels) of the lower left corner of
the screen.
name: A name for this op. Defaults to 'model_to_screen'.
name: A name for this op. Defaults to "model_to_screen".
Raises:
InvalidArgumentError: if any input contains data not in the specified range
Expand All @@ -320,10 +314,7 @@ def model_to_screen(point_model_space,
`point_model_space` in screen coordinates, and the second represents the 'w'
component of `point_model_space` in clip space.
"""
with tf.compat.v1.name_scope(name, "model_to_screen", [
point_model_space, model_to_eye_matrix, perspective_matrix,
screen_dimensions, lower_left_corner
]):
with tf.name_scope(name):
point_model_space = tf.convert_to_tensor(value=point_model_space)
model_to_eye_matrix = tf.convert_to_tensor(value=model_to_eye_matrix)
perspective_matrix = tf.convert_to_tensor(value=perspective_matrix)
Expand Down Expand Up @@ -374,7 +365,7 @@ def perspective_correct_barycentrics(triangle_vertices_model_space,
perspective_matrix,
screen_dimensions,
lower_left_corner=(0.0, 0.0),
name=None):
name="perspective_correct_barycentrics"):
"""Computes perspective correct barycentrics.
Note:
Expand All @@ -399,7 +390,7 @@ def perspective_correct_barycentrics(triangle_vertices_model_space,
lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last
dimension captures the position (in pixels) of the lower left corner of
the screen.
name: A name for this op. Defaults to 'perspective_correct_barycentrics'.
name: A name for this op. Defaults to "perspective_correct_barycentrics".
Raises:
InvalidArgumentError: if any input contains data not in the specified range
Expand All @@ -410,10 +401,7 @@ def perspective_correct_barycentrics(triangle_vertices_model_space,
A tensor of shape `[A1, ..., An, 3]`, containing perspective correct
barycentric coordinates.
"""
with tf.compat.v1.name_scope(name, "perspective_correct_barycentrics", [
triangle_vertices_model_space, pixel_position, model_to_eye_matrix,
perspective_matrix, screen_dimensions, lower_left_corner
]):
with tf.name_scope(name):
pixel_position = tf.convert_to_tensor(value=pixel_position)
triangle_vertices_model_space = tf.convert_to_tensor(
value=triangle_vertices_model_space)
Expand Down Expand Up @@ -453,7 +441,9 @@ def perspective_correct_barycentrics(triangle_vertices_model_space,
return tf.linalg.normalize(coeffs, ord=1, axis=-1)[0]


def interpolate_attributes(attribute, barycentric, name=None):
def interpolate_attributes(attribute,
barycentric,
name="interpolate_attributes"):
"""Interpolates attributes using barycentric weights.
Note:
Expand All @@ -464,13 +454,12 @@ def interpolate_attributes(attribute, barycentric, name=None):
stores a per-vertex `B`-dimensional attribute.
barycentric: A tensor of shape `[A1, ..., An, 3]`, where the last dimension
contains barycentric coordinates.
name: A name for this op. Defaults to 'interpolate_attributes'.
name: A name for this op. Defaults to "interpolate_attributes".
Returns:
A tensor of shape `[A1, ..., An, B]`, containing interpolated attributes.
"""
with tf.compat.v1.name_scope(name, "interpolate_attributes",
(attribute, barycentric)):
with tf.name_scope(name):
attribute = tf.convert_to_tensor(value=attribute)
barycentric = tf.convert_to_tensor(value=barycentric)

Expand All @@ -495,7 +484,7 @@ def perspective_correct_interpolation(triangle_vertices_model_space,
perspective_matrix,
screen_dimensions,
lower_left_corner=(0.0, 0.0),
name=None):
name="perspective_correct_interpolation"):
"""Returns perspective corrected interpolation of attributes over triangles.
Note:
Expand All @@ -522,7 +511,7 @@ def perspective_correct_interpolation(triangle_vertices_model_space,
lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last
dimension captures the position (in pixels) of the lower left corner of
the screen.
name: A name for this op. Defaults to 'perspective_correct_interpolation'.
name: A name for this op. Defaults to "perspective_correct_interpolation".
Raises:
tf.errors.InvalidArgumentError: if any input contains data not in the
Expand All @@ -532,15 +521,12 @@ def perspective_correct_interpolation(triangle_vertices_model_space,
Returns:
A tensor of shape `[A1, ..., An, B]`, containing interpolated attributes.
"""
with tf.compat.v1.name_scope(name, "perspective_correct_interpolation", [
triangle_vertices_model_space, attribute, pixel_position,
model_to_eye_matrix, perspective_matrix, screen_dimensions,
lower_left_corner
]):
with tf.name_scope(name):
barycentric = perspective_correct_barycentrics(
triangle_vertices_model_space, pixel_position, model_to_eye_matrix,
perspective_matrix, screen_dimensions, lower_left_corner)
return interpolate_attributes(attribute, barycentric)


# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
10 changes: 4 additions & 6 deletions tensorflow_graphics/rendering/opengl/rasterization_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@


def _dim_value(dim):
return 1 if dim is None else tf.compat.v1.dimension_value(dim)
return 1 if dim is None else tf.compat.dimension_value(dim)


# Empty vertex shader; all the work happens in the geometry shader.
Expand Down Expand Up @@ -104,7 +104,7 @@ def rasterize(vertices,
image_size,
enable_cull_face,
num_layers,
name=None):
name="rasterization_backend_rasterize"):
"""Rasterizes the scene.
This rasterizer estimates which triangle is associated with each pixel using
Expand All @@ -127,7 +127,7 @@ def rasterize(vertices,
and no face culling when False. Default is True.
num_layers: Number of depth layers to render. Not supported by current
backend yet, but exists for interface compatibility.
name: A name for this op. Defaults to 'rasterization_backend_rasterize'.
name: A name for this op. Defaults to "rasterization_backend_rasterize".
Returns:
A Framebuffer containing the rasterized values: barycentrics, triangle_id,
Expand All @@ -142,9 +142,7 @@ def rasterize(vertices,
The barycentric coordinates can be used to determine pixel validity instead.
See framebuffer.py for a description of the Framebuffer fields.
"""
with tf.compat.v1.name_scope(name, "rasterization_backend_rasterize",
(vertices, triangles, view_projection_matrices)):

with tf.name_scope(name):
if num_layers != 1:
raise ValueError("OpenGL rasterizer only supports single layer.")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def test_rasterize_preset(self):
with self.subTest(name="barycentric_coordinates_triangle_0"):
geometry_0 = tf.gather(vertices, triangles[0, :], axis=1)
pixels_0 = tf.transpose(
grid.generate((3.5, 2.5), (6.5, 4.5), (4, 3)), perm=(1, 0, 2))
a=grid.generate((3.5, 2.5), (6.5, 4.5), (4, 3)), perm=(1, 0, 2))
barycentrics_gt_0 = perspective_correct_interpolation(
geometry_0, pixels_0)
self.assertAllClose(
Expand All @@ -162,7 +162,7 @@ def test_rasterize_preset(self):
with self.subTest(name="barycentric_coordinates_triangle_1"):
geometry_1 = tf.gather(vertices, triangles[1, :], axis=1)
pixels_1 = tf.transpose(
grid.generate((3.5, 0.5), (6.5, 1.5), (4, 2)), perm=(1, 0, 2))
a=grid.generate((3.5, 0.5), (6.5, 1.5), (4, 2)), perm=(1, 0, 2))
barycentrics_gt_1 = perspective_correct_interpolation(
geometry_1, pixels_1)
self.assertAllClose(
Expand Down
Loading

0 comments on commit 898ed88

Please sign in to comment.