diff --git a/projects/nerf/README.md b/projects/nerf/README.md index e941903db..a103aab47 100644 --- a/projects/nerf/README.md +++ b/projects/nerf/README.md @@ -9,7 +9,6 @@ This project implements the Neural Radiance Fields (NeRF) from [1]. Installation ------------ 1) [Install PyTorch3D](https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md) - - Note that this repo requires `PyTorch` version `>= v1.6.0` due to dependency on `torch.searchsorted`. 2) Install other dependencies: - [`visdom`](https://github.com/facebookresearch/visdom) diff --git a/projects/nerf/nerf/raysampler.py b/projects/nerf/nerf/raysampler.py index 366e71e90..e061380cf 100644 --- a/projects/nerf/nerf/raysampler.py +++ b/projects/nerf/nerf/raysampler.py @@ -10,8 +10,7 @@ import torch from pytorch3d.renderer import MonteCarloRaysampler, NDCGridRaysampler, RayBundle from pytorch3d.renderer.cameras import CamerasBase - -from .utils import sample_pdf +from pytorch3d.renderer.implicit.sample_pdf import sample_pdf class ProbabilisticRaysampler(torch.nn.Module): diff --git a/projects/nerf/nerf/utils.py b/projects/nerf/nerf/utils.py index a50bcf7e7..01d065a58 100644 --- a/projects/nerf/nerf/utils.py +++ b/projects/nerf/nerf/utils.py @@ -7,71 +7,6 @@ import torch -def sample_pdf( - bins: torch.Tensor, - weights: torch.Tensor, - N_samples: int, - det: bool = False, - eps: float = 1e-5, -): - """ - Samples a probability density functions defined by bin edges `bins` and - the non-negative per-bin probabilities `weights`. - - Note: This is a direct conversion of the TensorFlow function from the original - release [1] to PyTorch. - - Args: - bins: Tensor of shape `(..., n_bins+1)` denoting the edges of the sampling bins. - weights: Tensor of shape `(..., n_bins)` containing non-negative numbers - representing the probability of sampling the corresponding bin. - N_samples: The number of samples to draw from each set of bins. - det: If `False`, the sampling is random. `True` yields deterministic - uniformly-spaced sampling from the inverse cumulative density function. - eps: A constant preventing division by zero in case empty bins are present. - - Returns: - samples: Tensor of shape `(..., N_samples)` containing `N_samples` samples - drawn from each set probability distribution. - - Refs: - [1] https://github.com/bmild/nerf/blob/55d8b00244d7b5178f4d003526ab6667683c9da9/run_nerf_helpers.py#L183 # noqa E501 - """ - - # Get pdf - weights = weights + eps # prevent nans - pdf = weights / weights.sum(dim=-1, keepdim=True) - cdf = torch.cumsum(pdf, -1) - cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) - - # Take uniform samples - if det: - u = torch.linspace(0.0, 1.0, N_samples, device=cdf.device, dtype=cdf.dtype) - u = u.expand(list(cdf.shape[:-1]) + [N_samples]).contiguous() - else: - u = torch.rand( - list(cdf.shape[:-1]) + [N_samples], device=cdf.device, dtype=cdf.dtype - ) - - # Invert CDF - inds = torch.searchsorted(cdf, u, right=True) - below = (inds - 1).clamp(0) - above = inds.clamp(max=cdf.shape[-1] - 1) - inds_g = torch.stack([below, above], -1).view( - *below.shape[:-1], below.shape[-1] * 2 - ) - - cdf_g = torch.gather(cdf, -1, inds_g).view(*below.shape, 2) - bins_g = torch.gather(bins, -1, inds_g).view(*below.shape, 2) - - denom = cdf_g[..., 1] - cdf_g[..., 0] - denom = torch.where(denom < eps, torch.ones_like(denom), denom) - t = (u - cdf_g[..., 0]) / denom - samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) - - return samples - - def calc_mse(x: torch.Tensor, y: torch.Tensor): """ Calculates the mean square error between tensors `x` and `y`. diff --git a/pytorch3d/renderer/implicit/sample_pdf.py b/pytorch3d/renderer/implicit/sample_pdf.py index d986b6829..48dd16634 100644 --- a/pytorch3d/renderer/implicit/sample_pdf.py +++ b/pytorch3d/renderer/implicit/sample_pdf.py @@ -75,14 +75,15 @@ def sample_pdf_python( This is a pure python implementation of the `sample_pdf` function. It may be faster than sample_pdf when the number of bins is very large, because it behaves as O(batchsize * [n_bins + log(n_bins) * n_samples] ) - whereas sample_pdf behaves as O(batchsize * n_bins * n_samples). + whereas sample_pdf behaves as O(batchsize * n_bins * n_samples). For 64 bins sample_pdf is much faster. Samples probability density functions defined by bin edges `bins` and the non-negative per-bin probabilities `weights`. Note: This is a direct conversion of the TensorFlow function from the original - release [1] to PyTorch. + release [1] to PyTorch. It requires PyTorch 1.6 or greater due to the use of + torch.searchsorted. Args: bins: Tensor of shape `(..., n_bins+1)` denoting the edges of the sampling bins. diff --git a/tests/test_sample_pdf.py b/tests/test_sample_pdf.py index 4d1cf9ac5..c73f0e3df 100644 --- a/tests/test_sample_pdf.py +++ b/tests/test_sample_pdf.py @@ -12,6 +12,7 @@ from pytorch3d.renderer.implicit.sample_pdf import sample_pdf, sample_pdf_python +@unittest.skipIf(torch.__version__[:4] == "1.5.", "searchsorted needs PyTorch 1.6") class TestSamplePDF(TestCaseMixin, unittest.TestCase): def setUp(self) -> None: super().setUp()