From ccdfe0c25aa651e361bd16229f1e6a89eb61637f Mon Sep 17 00:00:00 2001 From: Francis Williams Date: Tue, 21 May 2024 10:26:08 -0400 Subject: [PATCH] pull in changes from internal repo --- fvdb/.github/workflows/benchmarks.yml | 10 +- fvdb/.github/workflows/building.yml | 68 +++++++------ fvdb/.github/workflows/tests.yml | 8 +- fvdb/Dockerfile | 2 +- fvdb/LICENSE | 13 +++ fvdb/README.md | 139 ++++++++++++++++++-------- fvdb/docs/tutorials/installation.md | 12 +-- fvdb/env/build_environment.yml | 2 +- fvdb/env/build_requirements.txt | 4 +- fvdb/env/learn_environment.yml | 44 ++++++++ fvdb/env/release_base_environment.yml | 32 ++++++ fvdb/env/test_environment.yml | 4 +- fvdb/examples/docker_build.sh | 2 +- fvdb/fvdb/__init__.py | 4 +- fvdb/setup.py | 36 +++++-- fvdb/src/detail/TypesImpl.h | 2 +- 16 files changed, 288 insertions(+), 94 deletions(-) create mode 100644 fvdb/LICENSE create mode 100644 fvdb/env/learn_environment.yml create mode 100644 fvdb/env/release_base_environment.yml diff --git a/fvdb/.github/workflows/benchmarks.yml b/fvdb/.github/workflows/benchmarks.yml index dbfa5fbd1f..673ccfd5e6 100644 --- a/fvdb/.github/workflows/benchmarks.yml +++ b/fvdb/.github/workflows/benchmarks.yml @@ -31,7 +31,7 @@ jobs: - name: Run benchmarks run: | conda activate fvdb_test - MAX_JOBS=$(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}') python setup.py develop + MAX_JOBS=$(($(nproc) < $(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}') ? $(nproc) : $(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}'))) python setup.py develop pytest tests/benchmark --benchmark-json tests/benchmark/output.json shell: bash -el {0} @@ -48,4 +48,10 @@ jobs: alert-threshold: '200%' comment-on-alert: true fail-on-alert: true - alert-comment-cc-users: '@swahtz' \ No newline at end of file + alert-comment-cc-users: '@swahtz' + + - name: Clean Conda + run: | + conda clean -pty + shell: + bash -el {0} \ No newline at end of file diff --git a/fvdb/.github/workflows/building.yml b/fvdb/.github/workflows/building.yml index 007cbe6d14..5bcdd3c456 100644 --- a/fvdb/.github/workflows/building.yml +++ b/fvdb/.github/workflows/building.yml @@ -10,61 +10,67 @@ jobs: wheel: runs-on: - self-hosted - - ${{ matrix.cuda-version }} strategy: fail-fast: false matrix: # support version based on: https://download.pytorch.org/whl/torch/ - python-version: ['3.10', '3.11'] - torch-version: [2.1.0, 2.1.1, 2.1.2] - cuda-version: ['cu121'] - # exclude: - # - torch-version: 2.0.1 - # python-version: '3.10' - # - torch-version: 2.0.0 - # python-version: '3.11' + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + torch-version: [2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1, 2.2.2, 2.3.0] + cuda-version: ['12.1.1'] + exclude: + - torch-version: 2.1.2 + python-version: '3.12' + - torch-version: 2.1.1 + python-version: '3.12' + - torch-version: 2.1.0 + python-version: '3.12' steps: - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - - - name: Install PyTorch ${{ matrix.torch-version }}+${{ matrix.cuda-version }} + - name: Generate release_environment.yml run: | - pip install torch==${{ matrix.torch-version }} --extra-index-url https://download.pytorch.org/whl/${{ matrix.cuda-version }} - python -c "import torch; print('PyTorch:', torch.__version__)" - python -c "import torch; print('CUDA:', torch.version.cuda)" + echo "cp env/release_base_environment.yml release_environment.yml" + cp env/release_base_environment.yml release_environment.yml + echo " - python=${{ matrix.python-version }}" >> release_environment.yml + echo " - pytorch=${{ matrix.torch-version }}" >> release_environment.yml + perl -E '"${{ matrix.cuda-version }}" =~ /^(\d+\.\d+)/; say " - pytorch-cuda=$1"' >> release_environment.yml + perl -E '"${{ matrix.cuda-version }}" =~ /^(\d+)\.(\d+)/; say " - cuda>=$1.$2.0, <$1." . ($2+1) . ".0"' >> release_environment.yml + perl -E '"${{ matrix.cuda-version }}" =~ /^(\d+)\.(\d+)/; say " - cuda-nvcc>=$1.$2.0, <$1." . ($2+1) . ".0"' >> release_environment.yml + + - name: Set up fvdb_release Conda env + uses: conda-incubator/setup-miniconda@v3 + with: + miniconda-version: "latest" + mamba-version: "*" + channels: nvidia,pytorch,conda-forge,defaults + channel-priority: true + activate-environment: fvdb_release + environment-file: release_environment.yml - name: Set version run: | VERSION=`sed -n "s/^__version__ = '\(.*\)'/\1/p" fvdb/__init__.py` TORCH_VERSION=`echo "pt${{ matrix.torch-version }}" | sed "s/..$//" | sed "s/\.//g"` - CUDA_VERSION=`echo ${{ matrix.cuda-version }}` + CUDA_VERSION=`perl -E '"${{ matrix.cuda-version }}" =~ /^(\d+)\.(\d+)/; say "cu$1$2"'` echo "New version name: $VERSION+$TORCH_VERSION$CUDA_VERSION" sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" fvdb/__init__.py shell: - bash - - - name: Install dependencies - run: | - pip install -r env/build_requirements.txt + bash -el {0} - name: Build wheel run: | + conda activate fvdb_release mkdir dist TORCH_CUDA_ARCH_LIST="6.1;7.0;7.5;8.0;8.6+PTX" python setup.py bdist_wheel --dist-dir=dist shell: - bash + bash -el {0} - name: Rename wheel run: | - python scripts/rename_wheels.py ${{ matrix.torch-version }} ${{ matrix.cuda-version }} + python scripts/rename_wheels.py ${{ matrix.torch-version }} `perl -E '"${{ matrix.cuda-version }}" =~ /^(\d+)\.(\d+)/; say "cu$1$2"'` shell: - bash + bash -el {0} # - name: Push wheel to fvdb-wheels # id: push_directory @@ -111,3 +117,9 @@ jobs: # - name: Upload wheel # run: | # aws s3 sync dist s3://nksr/whl/torch-${{ matrix.torch-version }}+${{ matrix.cuda-version }} --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers + + - name: Clean Conda + run: | + conda clean -pty + shell: + bash -el {0} \ No newline at end of file diff --git a/fvdb/.github/workflows/tests.yml b/fvdb/.github/workflows/tests.yml index 04457a2326..b3505965f5 100644 --- a/fvdb/.github/workflows/tests.yml +++ b/fvdb/.github/workflows/tests.yml @@ -28,6 +28,12 @@ jobs: - name: Run tests run: | conda activate fvdb_test - MAX_JOBS=$(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}') python setup.py test + MAX_JOBS=$(($(nproc) < $(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}') ? $(nproc) : $(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}'))) python setup.py test + shell: + bash -el {0} + + - name: Clean Conda + run: | + conda clean -pty shell: bash -el {0} \ No newline at end of file diff --git a/fvdb/Dockerfile b/fvdb/Dockerfile index 842504cd84..4b3aa750dc 100644 --- a/fvdb/Dockerfile +++ b/fvdb/Dockerfile @@ -44,7 +44,7 @@ ENV TORCH_CUDA_ARCH_LIST "6.1;7.0;7.5;8.0;8.6+PTX" # used for cross-compilation in docker build ENV FORCE_CUDA=1 -WORKDIR /feature-vdb +WORKDIR /fvdb COPY env/test_environment.yml . RUN /opt/conda/bin/conda env create -f test_environment.yml \ diff --git a/fvdb/LICENSE b/fvdb/LICENSE new file mode 100644 index 0000000000..e693e159d2 --- /dev/null +++ b/fvdb/LICENSE @@ -0,0 +1,13 @@ +Copyright 2023 NVIDIA Corporation + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Join Us diff --git a/fvdb/README.md b/fvdb/README.md index 2ff10a2e9f..a1761d3c00 100644 --- a/fvdb/README.md +++ b/fvdb/README.md @@ -1,9 +1,7 @@ -# *f*(VDB) +# *ƒ*(VDB) -#### The *f*VDB API is in alpha. If you depend on it for your project, expect it to change under you! - -This repository contains the code for *f*VDB, a data structure for encoding and operating on *Sparse voxel hierarchies* of features in PyTorch. A sparse voxel hierarchy is a coarse-to-fine hierarchy of sparse voxel grids such that every fine voxel is contained within some coarse voxel. The image below illustrates an example. *f*VDB supports storing PyTorch tensors at the corners and centers of voxels in a hierarchy and enables a number of differentiable operations on these tensors (*e.g.* trilinear interpolation, splatting, ray tracing). +This repository contains the code for *f*VDB, a data structure for encoding and operating on *sparse voxel hierarchies* of features in PyTorch. A sparse voxel hierarchy is a coarse-to-fine hierarchy of sparse voxel grids such that every fine voxel is contained within some coarse voxel. The image below illustrates an example. *f*VDB supports using PyTorch Tensors to represent features at the corners and centers of voxels in a hierarchy and enables a number of differentiable operations on these Tensors (*e.g.* trilinear interpolation, convolution, splatting, ray tracing).

fVDB Teaser @@ -11,8 +9,43 @@ This repository contains the code for *f*VDB, a data structure for encoding and

An example of a sparse voxel hierarchy with 3 levels. Each fine voxel is contained within exactly one coarse voxel.

+## Learning to Use *f*VDB + +After [installing *f*VDB](#installing-fvdb), we recommend starting with our walk-through [notebooks](notebooks) which provide a gentle, illustrated introduction to the main concepts and operations in *f*VDB. + +Once familiar with the basics, [Usage Examples](#usage-examples) introduces a few of the useful python scripts that can be explored in the [examples](examples) directory. + +Our [documentation](docs) provides a more detailed explanation of the concepts and operations available in *f*VDB as well as providing an API reference. The documentation can be built locally by following the instructions in the [Building Documentation](#building-documentation) section or can be accessed online at [TODO: insert link to online documentation]. + +## Installing *f*VDB + +fVDB is provided as an installable python package from *[todo: insert package distributor]*. We provide pre-built packages of the latest *f*VDB version for the following dependent library configurations: + +| PyTorch | Python | CUDA | +| -------------- | ---------- | ------- | +| 2.0.0-2.0.3 | 3.8 - 3.11 | `cu121` | +| 2.1.0-2.1.3 | 3.8 - 3.12 | `cu121` | +| 2.3.0 | 3.8 - 3.12 | `cu121` | + + +***Note:** Linux is the only platform currently supported (Ubuntu >= 20.04 recommended). + -## Building *f*VDB +Use the following command to install `fvdb` into your environment. + +```bash +TODO: Insert package install command +``` + +If you intend to use our learning material such as the [notebooks](notebooks) or [examples](examples), we recommend you start from the `fvdb_learn` conda environment which contains all the dependencies needed to run the learning material as well as build *f*VDB from source. To create this environment, run the following commands from the root of this repository: + +```bash +conda env create -f env/learn_environment.yml +conda activate fvdb_learn +``` + + +## Building *f*VDB from Source *f*VDB is a Python library implemented as a C++ Pytorch extension. **(Optional) Install libMamba for a huge quality of life improvement when using Conda** @@ -22,23 +55,29 @@ conda install -n base conda-libmamba-solver conda config --set solver libmamba ``` -**Conda Environment.** Next, create the `fvdb` conda environment by running the following command from the root of this repository, and then grabbing a ☕: +### Conda Environment + +Next, create the `fvdb` conda environment by running the following command from the root of this repository, and then grabbing a ☕: ```shell conda env create -f env/test_environment.yml ``` -Note: You can optionally use the `env/build_environment.yml` environment file if you want a minimum set of dependencies needed to build *f*VDB and don't intend to run the tests. +**Note:** You can optionally use the `env/build_environment.yml` environment file if you want a minimum set of dependencies needed to build *f*VDB and don't intend to run the tests or the `env/learn_environment` if you would like the additional packages needed to run the examples and view their visualizations. Now activate the environment: ```shell -conda activate fvdb +conda activate fvdb_test ``` -PyTorch cannot find the conda `libcudart.so` when JIT compiling extensions, so create the following symlink: -```shell -ln -s ${CONDA_PREFIX}/lib ${CONDA_PREFIX}/lib64 + + +### Building *f*VDB + +**:warning: Note:** Compilation can be very memory-consuming. We recommend setting the `MAX_JOBS` environment variable to control compilation job parallelism with a value that allows for one job every 2.5GB of memory: + +```bash +export MAX_JOBS=$(free -g | awk '/^Mem:/{jobs=int($4/2.5); if(jobs<1) jobs=1; print jobs}') ``` -**Building *f*VDB.** You could either do an editable install with setuptools: ```shell python setup.py develop @@ -47,59 +86,52 @@ or directly install it to your site package folder if you are developing extensi ```shell pip install . ``` -In both of the above cases, you should run from the root of the repository. -Note: Compilation can be very memory-consuming. Please add environment variable `MAX_JOBS=N` and set `N` to be a small value to reduce parallelism, so your compilation doesn't get killed due to OOM. -**Running Tests.** To make sure that everything works by running tests: + +### Running Tests + +To make sure that everything works by running tests: ```shell python setup.py test ``` -**Building Docs.** To build the documentation, simply run: +### Building Documentation + +To build the documentation, simply run: ```shell -# (Sphinx-7.0.0 works) python setup.py build_ext --inplace sphinx-build -E -a docs/ build/sphinx # View the docs open build/sphinx/index.html ``` -**Docker Image.** To build and test feature-vdb, we have the dockerfile available: +### Docker Image + +To build and test *f*VDB, we have the dockerfile available: ```shell -# Build feature-vdb +# Build fvdb docker build . -t fvdb-dev -# Run feature-vdb (or replace with your command) +# Run fvdb (or replace with your command) docker run -it --gpus all --rm \ --user $(id -u):$(id -g) \ --mount type=bind,source="$HOME/.ssh",target=/root/.ssh \ - --mount type=bind,source="$(pwd)",target=/feature-vdb \ + --mount type=bind,source="$(pwd)",target=/fvdb \ fvdb-dev:latest \ conda run -n fvdb_test --no-capture-output python setup.py test ``` -## Code Structure -The main source code for fVDB lives in the [src](src) directory. There are several important files here: -* `src/PythonBindings.cpp` exposes functionality directly to Python. It is mainly a wrapper around the `SparseFeatureIndexGrid` class. -* `src/SparseFeatureIndexGrid.h` contains the implementation of `SparseFeatureIndexGrid` which is the core data structure on which fVDB is built. A `SparseFeatureIndexGrid` acts as a map between `(i, j, k)` integer coordinates and offsets in linear memory. This mapping can be used to perform a host of operations. The methods in this class are mostly lightweight wrappers around a set of CPU and CUDA *kernels*. The function prototypes for these kernels are defined in `src/Ops.h`. -* `src/Ops.h` contains the function prototypes for the main kernels used by fVDB. These are only prototypes since there are both CPU kernels (implemented in `src/ops/cpu`) and CUDA kernels (implemented in `src/ops/cuda`) - * `src/ops/cpu/` contains CPU only implementations of the main kernels used by fVDB. - * `src/ops/cuda` contains CUDA implementations of the main kernels used by fVDB. -* `src/autograd` contains C++ implementations of PyTorch autograd functions for differentiable operations. Including `autograd/Functions.h` includs all of the functions in this folder. -* `src/utils` contains a number of utilities which make it easier to use NanoVDB. - - ## Usage Examples -The [scripts](scripts) directory contains a number of examples of using the `fvdb` Python package. The sections below show some notable examples and their outputs. Run all commands from the root of the repository. +The [examples](examples) directory contains a number of useful illustrations using the `fvdb` Python package. The sections below show some notable examples and their outputs. Run all commands from the root of the repository. ### Trilinear sampling of grids ``` -python scripts/debug_trilerp.py +python examples/sample_trilinear.py ``` -This script generates a grid with scalars at the corners of each voxel and samples this grid at points. The visualization below shows the points colored according to their sampled values as well as the grid corners. +This script generates a grid with scalars at the corners of each voxel and samples this grid at points. The visualization below shows the points colored according to their sampled values as well as the values at grid corners.

fVDB trilinear interpolation demo

Trilinearly interpolate the corner values at the points.
@@ -108,9 +140,9 @@ This script generates a grid with scalars at the corners of each voxel and sampl ### Trilinear splatting into grids ``` -python scripts/debug_splat.py +python examples/splat_trilinear.py ``` -This script splats normals of a point cloud onto grid centers. The green arrows are the normals splatted onto each grid center +This script splats normals of a point cloud onto grid centers. The green arrows represent the values of the normals splatted onto each grid center

fVDB trilinear splatting demo

Splat the normals at the blue points into the center of each grid cell. The green arrows are the splatted normals
@@ -119,7 +151,7 @@ This script splats normals of a point cloud onto grid centers. The green arrows ### Tracing voxels along rays (hierarchical DDA) ``` -python scripts/debug_ray_voxel_marching.py +python examples/ray_voxel_marching.py ``` This script demonstrates finding the first `N` voxels which lie along a ray (returning thier index as well as their entry and exit points).

@@ -130,7 +162,7 @@ This script demonstrates finding the first `N` voxels which lie along a ray (ret ### Tracing contiguous segments along rays ``` -python scripts/debug_ray_segment_marching.py +python examples/ray_segment_marching.py ``` This script demonstrates finding the first `N` continuous segments of voxels which lie along a ray (returning thier index as well as their entry and exit points).

@@ -141,7 +173,7 @@ This script demonstrates finding the first `N` continuous segments of voxels whi ### Backpropagating through sampling and splatting ``` -python scripts/debug_overfit_sdf.py +python examples/overfit_sdf.py ``` This scripts fits SDF values at a grid corner to the SDF of a mesh using gradient descent.

@@ -157,3 +189,30 @@ python scripts/debug_grad_trilerp.py python scripts/debug_grad_splat.py ``` +## Code Structure +The main source code for fVDB lives in the [src](src) directory. There are several important files here: +* `src/python/Bindings.cpp` exposes functionality directly to Python. It is mainly a wrapper around the core classes such as `fvdb::GridBatch` and `fvdb::JaggedTensor`. +* `src/GridBatch.h` contains the implementation of `fvdb::GridBatch` which is the core data structure on which fVDB is built. A `GridBatch` acts as a map between `(i, j, k)` integer coordinates and offsets in linear memory. This mapping can be used to perform a host of operations. The methods in this class are mostly lightweight wrappers around a set of CPU and CUDA *kernels*. The function prototypes for these kernels are defined in `src/detail/ops/Ops.h`. +* `src/detail/ops/Ops.h` contains the function prototypes for the main kernels used by fVDB. Host and device kernel implementations are provided in the `src/detail/ops/*.cu` source files. +* `src/detail/autograd` contains C++ implementations of PyTorch autograd functions for differentiable operations. `#include ` includes all of the functions in this directory. +* `src/detail/utils/nanovdb` contains a number of utilities which make it easier to use NanoVDB. + + +## References + +Please consider citing this when using *f*VDB in a project. You can use the citation BibTeX: + +```bibtex +@article{williams2024fvdb, + title={fVDB: A Deep-Learning Framework for Sparse, Large-Scale, and High-Performance Spatial Intelligence}, + author={Williams, Francis and Huang, Jiahui and Swartz, Jonathan and Klar, Gergely and Thakkar, Vijay and Cong, Matthew and Ren, Xuanchi and Li, Ruilong and Fuji-Tsang, Clement and Fidler, Sanja and Sifakis, Eftychios and Museth, Ken}, + journal={ACM Transactions on Graphics (TOG)}, + volume={43}, + number={4}, + pages={133:1--133:15}, + year={2024}, + publisher={ACM New York, NY, USA} +} +``` + +## Contact \ No newline at end of file diff --git a/fvdb/docs/tutorials/installation.md b/fvdb/docs/tutorials/installation.md index e9039c90b7..f5f88c6d6a 100644 --- a/fvdb/docs/tutorials/installation.md +++ b/fvdb/docs/tutorials/installation.md @@ -7,11 +7,11 @@ If you want to install the `fvdb` package into other python environments, we pro Note that only Linux is supported for now (Ubuntu >= 20.04 recommended). -| | Python | `cu113` | `cu116` | `cu117` | `cu118` | -| -------------- | ---------- | ------- | ------- | ------- | ------- | -| PyTorch 2.0.0 | 3.8 - 3.11 | | | ✅ | ✅ | -| PyTorch 1.13.0 | 3.7 - 3.11 | | ✅ | ✅ | | -| PyTorch 1.12.0 | 3.7 - 3.10 | ✅ | ✅ | | | +| PyTorch | Python | `cu121` | +| -------------- | ---------- | ------- | +| 2.0.0-2.0.3 | 3.8 - 3.11 | ✅ | +| 2.1.0-2.1.3 | 3.8 - 3.12 | ✅ | +| 2.3.0 | 3.8 - 3.12 | ✅ | Use the following command to install `fvdb`. @@ -19,4 +19,4 @@ Use the following command to install `fvdb`. pip install -U fvdb -f https://fvdb.huangjh.tech/whl/torch-${TORCH_VERSION}+${CUDA_VERSION}.html ``` -An example combination would be `TORCH_VERSION=2.0.0` and `CUDA_VERSION=cu118`. +An example combination would be `TORCH_VERSION=2.0.0` and `CUDA_VERSION=cu121`. diff --git a/fvdb/env/build_environment.yml b/fvdb/env/build_environment.yml index 61849b0be6..2161e58127 100644 --- a/fvdb/env/build_environment.yml +++ b/fvdb/env/build_environment.yml @@ -21,7 +21,7 @@ dependencies: - cmake - make - ninja - - sphinx + - sphinx>=7.0.0 - sphinx_rtd_theme - myst-parser - linkify-it-py diff --git a/fvdb/env/build_requirements.txt b/fvdb/env/build_requirements.txt index 47a76606bd..965cb5d1d2 100644 --- a/fvdb/env/build_requirements.txt +++ b/fvdb/env/build_requirements.txt @@ -3,4 +3,6 @@ pip >= 23.3.1 setuptools >= 68.2.2 wheel GitPython -ninja \ No newline at end of file +ninja +requests +tqdm \ No newline at end of file diff --git a/fvdb/env/learn_environment.yml b/fvdb/env/learn_environment.yml new file mode 100644 index 0000000000..35d195d9b5 --- /dev/null +++ b/fvdb/env/learn_environment.yml @@ -0,0 +1,44 @@ +name: fvdb_learn +channels: + - pyg + - nvidia + - pytorch + - conda-forge +dependencies: + - python=3.10 + - pytorch=2.1.2 + - pytorch-cuda=12.1 + - tensorboard + - pybind11 + - pip + - git + - gitpython + - ca-certificates + - certifi + - openssl + - cuda>=12.1.0, <12.2.0 + - cuda-nvcc>=12.1.0, <12.2.0 + - parameterized + - gcc_linux-64=11 + - gxx_linux-64=11 + - setuptools + - cmake + - make + - ninja + - ipython + - matplotlib + - tqdm + - pyg + - sparsehash + - pytorch-scatter + - sphinx>=7.0.0 + - sphinx_rtd_theme + - myst-parser + - pandas + - pytest-benchmark + - polyscope + - pip: + - point-cloud-utils + - linkify-it-py + - python-pycg + - https://nksr.s3.ap-northeast-1.amazonaws.com/dev-whls/torchsparse-2.0.0b0-cp310-cp310-linux_x86_64.whl diff --git a/fvdb/env/release_base_environment.yml b/fvdb/env/release_base_environment.yml new file mode 100644 index 0000000000..c0ac57c447 --- /dev/null +++ b/fvdb/env/release_base_environment.yml @@ -0,0 +1,32 @@ +# This release_base_environment is used by the 'Building Release Wheels' (.github/workflows/building.yml) GitHub Action +# the cuda, python, and pytorch versions are configured and appended by the Action at runtime. +name: fvdb_build_38 +channels: + - nvidia + - pytorch + - conda-forge +dependencies: + - pybind11 + - git + - gitpython + - ca-certificates + - certifi + - openssl + - gcc_linux-64=11 + - gxx_linux-64=11 + - setuptools + - cmake + - make + - ninja + - sphinx>=7.0.0 + - sphinx_rtd_theme + - myst-parser + - linkify-it-py + - numpy + - tqdm + - requests + - python=3.8 + - pytorch=2.1.2 + - pytorch-cuda=12.1 + - cuda>=12.1.0, <12.2.0 + - cuda-nvcc>=12.1.0, <12.2.0 \ No newline at end of file diff --git a/fvdb/env/test_environment.yml b/fvdb/env/test_environment.yml index 87ec091c4e..a580f7a20e 100644 --- a/fvdb/env/test_environment.yml +++ b/fvdb/env/test_environment.yml @@ -31,14 +31,12 @@ dependencies: - pyg - sparsehash - pytorch-scatter - - sphinx + - sphinx>=7.0.0 - sphinx_rtd_theme - myst-parser - pandas - pytest-benchmark - pip: - - point-cloud-utils - - polyscope - linkify-it-py - python-pycg - https://nksr.s3.ap-northeast-1.amazonaws.com/dev-whls/torchsparse-2.0.0b0-cp310-cp310-linux_x86_64.whl diff --git a/fvdb/examples/docker_build.sh b/fvdb/examples/docker_build.sh index 5820548f72..42b362a06c 100755 --- a/fvdb/examples/docker_build.sh +++ b/fvdb/examples/docker_build.sh @@ -11,4 +11,4 @@ TAG="${2:-latest}" set -x -docker build --build-arg GIT_ACCESS_TOKEN=$GIT_ACCESS_TOKEN -t feature-vdb:$TAG . +docker build --build-arg GIT_ACCESS_TOKEN=$GIT_ACCESS_TOKEN -t fvdb:$TAG . diff --git a/fvdb/fvdb/__init__.py b/fvdb/fvdb/__init__.py index 716587c3c2..5831ebc165 100644 --- a/fvdb/fvdb/__init__.py +++ b/fvdb/fvdb/__init__.py @@ -17,8 +17,8 @@ from . import nn -__version__ = '0.0.0' -__version_info__ = (0, 0, 0) +__version__ = '0.0.1' +__version_info__ = (0, 0, 1) __all__ = [ "GridBatch", diff --git a/fvdb/setup.py b/fvdb/setup.py index 7ff104123b..8b7a231a19 100644 --- a/fvdb/setup.py +++ b/fvdb/setup.py @@ -4,6 +4,7 @@ import shutil import requests from tqdm import tqdm +from pathlib import Path import git import git.repo @@ -92,15 +93,29 @@ def build_extension(self, _ext): self.copy_file(regular_file, inplace_file, level=self.verbose) # type: ignore def run(self) -> None: + # Use PAT clone for github actions (no fingerprinting) + if os.getenv('GITHUB_ACTIONS') == 'true' or os.getenv('GITLAB_CI') == 'true': + token = os.getenv('GITHUB_ACCESS_TOKEN') + nanovdb_url = f"https://{token}@github.com/NVIDIA-Omniverse/NanoVDB.git" + else: + nanovdb_url = "git@github.com:NVIDIA-Omniverse/NanoVDB.git" + + self.download_external_dep( + name='nanovdb', + git_url=nanovdb_url, + git_tag='bfdd01dfd4e555fcbb3d6b5a3d85e8290d1eaec9' + ) + _, cutlass_repo = self.download_external_dep( name='cutlass', git_url='https://github.com/NVIDIA/cutlass.git', git_tag='v3.4.0' ) try: - cutlass_repo.git.apply(os.path.join(os.path.dirname(__file__), 'env/cutlass.patch')) - except GitCommandError: - print("Failed to apply cutlass patch, continuing without patching") + # NOTE: In python <=3.8, __file__ will be a relative path and >3.8 it is an absolute path + cutlass_repo.git.apply(Path(__file__).resolve().parent / 'env' / 'cutlass.patch') + except GitCommandError as e: + print(f"Failed to apply cutlass patch: {str(e)}, continuing without patching") self.download_external_dep( name='cudnn_fe', @@ -124,8 +139,7 @@ def run(self) -> None: # Find all the headers and copy them into the build directory. # This way extension modules of FVDB can include them. fvdb_headers = get_header_files_recursive('src', 'fvdb') - cwd = os.path.dirname(os.path.abspath(__file__)) - nanovdb_headers = get_header_files_recursive(os.path.join(cwd, '..', 'nanovdb'), 'nanovdb') + nanovdb_headers = get_header_files_recursive('external/nanovdb/', 'nanovdb') for header_folder, header_files in fvdb_headers + nanovdb_headers: os.makedirs(os.path.join(self.build_lib, header_folder), exist_ok=True) @@ -242,7 +256,7 @@ def download_and_install_cudnn(): name='fvdb.fvdblib', sources=get_source_files_recursive('src', include_bindings=False), include_dirs=[os.path.join(cwd, 'src'), - os.path.join(cwd, '..', 'nanovdb'), + os.path.join(cwd, 'external/nanovdb'), os.path.join(cwd, 'external/cutlass/include'), os.path.join(cwd, 'external/c-blosc/install/include'), os.path.join(cwd, 'external/cudnn_fe/include')] + cudnn_include_dirs, @@ -255,7 +269,7 @@ def download_and_install_cudnn(): name='fvdb._Cpp', sources=get_source_files_recursive('src/python/'), include_dirs=[os.path.join(cwd, 'src'), - os.path.join(cwd, '..', 'nanovdb'), + os.path.join(cwd, 'external/nanovdb'), os.path.join(cwd, 'external/cutlass/include'), os.path.join(cwd, 'external/c-blosc/install/include')], library_dirs=[os.path.join(cwd, 'fvdb')], @@ -265,7 +279,15 @@ def download_and_install_cudnn(): 'nvcc': nvcc_flags}, language='c++') + def retrieve_version(file_path = "fvdb/__init__.py"): + with open(file_path, "r") as f: + for line in f: + if line.startswith("__version__"): + return line.split("=")[1].strip().strip("'").strip('"') + return "0.0.0" + setup(name='fvdb', + version = retrieve_version(), ext_modules=[lib_ext, bind_ext], packages=['fvdb', 'fvdb.nn', 'fvdb.utils'], include_package_data=True, diff --git a/fvdb/src/detail/TypesImpl.h b/fvdb/src/detail/TypesImpl.h index 11dfd0ef8c..a6182dae0c 100644 --- a/fvdb/src/detail/TypesImpl.h +++ b/fvdb/src/detail/TypesImpl.h @@ -222,7 +222,7 @@ class Vec3BatchImpl { TORCH_CHECK_VALUE(mValue[i][0] > 0 && mValue[i][1] > 0 && mValue[i][2] > 0, "Expected all coordinates of " + name + " to be positive"); } } - TORCH_CHECK_VALUE(batchSize == mValue.size(), "Expected " + name + "batch of 3D coordinates to have size [" + std::to_string(batchSize) + ", 3]"); + TORCH_CHECK_VALUE(batchSize == mValue.size(), "Expected " + name + " batch of 3D coordinates to have size [" + std::to_string(batchSize) + ", 3]"); return mValue; }