Skip to content

Commit

Permalink
add from_ndc to unproject_points
Browse files Browse the repository at this point in the history
Summary: Give unproject_points an argument letting it bypass screen space. use it to let the raysampler work for cameras defined in screen space.

Reviewed By: gkioxari

Differential Revision: D32596600

fbshipit-source-id: 2fe585dcd138cdbc65dd1c70e1957fd894512d3d
  • Loading branch information
bottler authored and facebook-github-bot committed Dec 7, 2021
1 parent a0e2d2e commit cff4876
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 3 deletions.
40 changes: 38 additions & 2 deletions pytorch3d/renderer/cameras.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,10 @@ def unproject_points(self):
coordinates using the camera extrinsics `R` and `T`.
`False` ignores `R` and `T` and unprojects to
the camera view coordinates.
from_ndc: If `False` (default), assumes xy part of input is in
NDC space if self.in_ndc(), otherwise in screen space. If
`True`, assumes xy is in NDC space even if the camera
is defined in screen space.
Returns
new_points: unprojected points with the same shape as `xy_depth`.
Expand Down Expand Up @@ -998,12 +1002,27 @@ def get_projection_transform(self, **kwargs) -> Transform3d:
return transform

def unproject_points(
self, xy_depth: torch.Tensor, world_coordinates: bool = True, **kwargs
self,
xy_depth: torch.Tensor,
world_coordinates: bool = True,
from_ndc: bool = False,
**kwargs
) -> torch.Tensor:
"""
Args:
from_ndc: If `False` (default), assumes xy part of input is in
NDC space if self.in_ndc(), otherwise in screen space. If
`True`, assumes xy is in NDC space even if the camera
is defined in screen space.
"""
if world_coordinates:
to_camera_transform = self.get_full_projection_transform(**kwargs)
else:
to_camera_transform = self.get_projection_transform(**kwargs)
if from_ndc:
to_camera_transform = to_camera_transform.compose(
self.get_ndc_camera_transform()
)

unprojection_transform = to_camera_transform.inverse()
xy_inv_depth = torch.cat(
Expand All @@ -1030,6 +1049,8 @@ def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
For cameras defined in screen space, we adjust the principal point computation
which is defined in the image space (commonly) and scale the points to NDC space.
This transform leaves the depth unchanged.
Important: This transforms assumes PyTorch3D conventions for the input points,
i.e. +X left, +Y up.
"""
Expand Down Expand Up @@ -1199,12 +1220,27 @@ def get_projection_transform(self, **kwargs) -> Transform3d:
return transform

def unproject_points(
self, xy_depth: torch.Tensor, world_coordinates: bool = True, **kwargs
self,
xy_depth: torch.Tensor,
world_coordinates: bool = True,
from_ndc: bool = False,
**kwargs
) -> torch.Tensor:
"""
Args:
from_ndc: If `False` (default), assumes xy part of input is in
NDC space if self.in_ndc(), otherwise in screen space. If
`True`, assumes xy is in NDC space even if the camera
is defined in screen space.
"""
if world_coordinates:
to_camera_transform = self.get_full_projection_transform(**kwargs)
else:
to_camera_transform = self.get_projection_transform(**kwargs)
if from_ndc:
to_camera_transform = to_camera_transform.compose(
self.get_ndc_camera_transform()
)

unprojection_transform = to_camera_transform.inverse()
return unprojection_transform.transform_points(xy_depth)
Expand Down
2 changes: 1 addition & 1 deletion pytorch3d/renderer/implicit/raysampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ def _xy_to_ray_bundle(
)

# unproject the points
unprojected = cameras.unproject_points(to_unproject) # pyre-ignore
unprojected = cameras.unproject_points(to_unproject, from_ndc=True) # pyre-ignore

# split the two planes back
rays_plane_1_world = unprojected[:, :n_rays_per_image]
Expand Down

0 comments on commit cff4876

Please sign in to comment.