Skip to content

Commit

Permalink
Merge branch 'main' into fer2013.py_issue
Browse files Browse the repository at this point in the history
  • Loading branch information
real-ojaswi committed May 14, 2024
2 parents 3ff1d52 + 947ae1d commit dabe6e3
Show file tree
Hide file tree
Showing 47 changed files with 2,896 additions and 347 deletions.
41 changes: 10 additions & 31 deletions .github/scripts/cmake.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,10 @@ fi
echo '::group::Prepare CMake builds'
mkdir -p cpp_build

pushd test/tracing/frcnn
python trace_model.py
pushd examples/cpp
python script_model.py
mkdir -p build
mv fasterrcnn_resnet50_fpn.pt build
popd

pushd examples/cpp/hello_world
python trace_model.py
mkdir -p build
mv resnet18.pt build
mv resnet18.pt fasterrcnn_resnet50_fpn.pt build
popd

# This was only needed for the tracing above
Expand All @@ -65,6 +59,7 @@ echo '::endgroup::'
echo '::group::Build and install libtorchvision'
pushd cpp_build


# On macOS, CMake is looking for the library (*.dylib) and the header (*.h) separately. By default, it prefers to load
# the header from other packages that install the library. This easily leads to a mismatch if the library installed
# from conda doesn't have the exact same version. Thus, we need to explicitly set CMAKE_FIND_FRAMEWORK=NEVER to force
Expand All @@ -85,40 +80,24 @@ fi
popd
echo '::endgroup::'

echo '::group::Build and run project that uses Faster-RCNN'
pushd test/tracing/frcnn/build

cmake .. -DTorch_DIR="${Torch_DIR}" -DWITH_CUDA="${WITH_CUDA}" \
-DCMAKE_PREFIX_PATH="${CONDA_PREFIX}" \
-DCMAKE_FIND_FRAMEWORK=NEVER
if [[ $OS_TYPE == windows ]]; then
"${PACKAGING_DIR}/windows/internal/vc_env_helper.bat" "${PACKAGING_DIR}/windows/internal/build_frcnn.bat" $JOBS
cd Release
cp ../fasterrcnn_resnet50_fpn.pt .
else
make -j$JOBS
fi

./test_frcnn_tracing

popd
echo '::endgroup::'

echo '::group::Build and run C++ example'
pushd examples/cpp/hello_world/build
pushd examples/cpp/build

cmake .. -DTorch_DIR="${Torch_DIR}" \
-DCMAKE_PREFIX_PATH="${CONDA_PREFIX}" \
-DCMAKE_FIND_FRAMEWORK=NEVER
-DCMAKE_FIND_FRAMEWORK=NEVER \
-DUSE_TORCHVISION=ON # Needed for faster-rcnn since it's using torchvision ops like NMS.
if [[ $OS_TYPE == windows ]]; then
"${PACKAGING_DIR}/windows/internal/vc_env_helper.bat" "${PACKAGING_DIR}/windows/internal/build_cpp_example.bat" $JOBS
cd Release
cp ../resnet18.pt .
cp ../fasterrcnn_resnet50_fpn.pt .
else
make -j$JOBS
fi

./hello-world
./run_model resnet18.pt
./run_model fasterrcnn_resnet50_fpn.pt

popd
echo '::endgroup::'
2 changes: 1 addition & 1 deletion .github/scripts/unittest.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ eval "$($(which conda) shell.bash hook)" && conda deactivate && conda activate c

echo '::group::Install testing utilities'
# TODO: remove the <8 constraint on pytest when https://github.com/pytorch/vision/issues/8238 is closed
pip install --progress-bar=off "pytest<8" pytest-mock pytest-cov expecttest!=0.2.0
pip install --progress-bar=off "pytest<8" pytest-mock pytest-cov expecttest!=0.2.0 requests
echo '::endgroup::'

python test/smoke_test.py
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ jobs:
echo '::group::Lint C source'
set +e
./.github/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable ./clang-format
./.github/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable ./clang-format --exclude "torchvision/csrc/io/image/cpu/giflib/*"
if [ $? -ne 0 ]; then
git --no-pager diff
Expand Down
12 changes: 1 addition & 11 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ option(WITH_CUDA "Enable CUDA support" OFF)
option(WITH_MPS "Enable MPS support" OFF)
option(WITH_PNG "Enable features requiring LibPNG." ON)
option(WITH_JPEG "Enable features requiring LibJPEG." ON)
option(USE_PYTHON "Link to Python when building" OFF)

if(WITH_CUDA)
enable_language(CUDA)
Expand All @@ -33,11 +32,6 @@ if (WITH_JPEG)
find_package(JPEG REQUIRED)
endif()

if (USE_PYTHON)
add_definitions(-DUSE_PYTHON)
find_package(Python3 REQUIRED COMPONENTS Development)
endif()

function(CUDA_CONVERT_FLAGS EXISTING_TARGET)
get_property(old_flags TARGET ${EXISTING_TARGET} PROPERTY INTERFACE_COMPILE_OPTIONS)
if(NOT "${old_flags}" STREQUAL "")
Expand Down Expand Up @@ -80,7 +74,7 @@ include(GNUInstallDirs)
include(CMakePackageConfigHelpers)

set(TVCPP torchvision/csrc)
list(APPEND ALLOW_LISTED ${TVCPP} ${TVCPP}/io/image ${TVCPP}/io/image/cpu ${TVCPP}/models ${TVCPP}/ops
list(APPEND ALLOW_LISTED ${TVCPP} ${TVCPP}/io/image ${TVCPP}/io/image/cpu ${TVCPP}/io/image/cpu/giflib ${TVCPP}/models ${TVCPP}/ops
${TVCPP}/ops/autograd ${TVCPP}/ops/cpu ${TVCPP}/io/image/cuda)
if(WITH_CUDA)
list(APPEND ALLOW_LISTED ${TVCPP}/ops/cuda ${TVCPP}/ops/autocast)
Expand Down Expand Up @@ -110,10 +104,6 @@ if (WITH_JPEG)
target_link_libraries(${PROJECT_NAME} PRIVATE ${JPEG_LIBRARIES})
endif()

if (USE_PYTHON)
target_link_libraries(${PROJECT_NAME} PRIVATE Python3::Python)
endif()

set_target_properties(${PROJECT_NAME} PROPERTIES
EXPORT_NAME TorchVision
INSTALL_RPATH ${TORCH_INSTALL_PREFIX}/lib)
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ We don't officially support building from source using `pip`, but _if_ you do, y
#### Other development dependencies (some of these are needed to run tests):

```
pip install expecttest flake8 typing mypy pytest pytest-mock scipy
pip install expecttest flake8 typing mypy pytest pytest-mock scipy requests
```

## Development Process
Expand Down
42 changes: 8 additions & 34 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,40 +74,14 @@ python setup.py install

# Using the models on C++

TorchVision provides an example project for how to use the models on C++ using JIT Script.

Installation From source:

```
mkdir build
cd build
# Add -DWITH_CUDA=on support for the CUDA if needed
cmake ..
make
make install
```

Once installed, the library can be accessed in cmake (after properly configuring `CMAKE_PREFIX_PATH`) via the
`TorchVision::TorchVision` target:

```
find_package(TorchVision REQUIRED)
target_link_libraries(my-target PUBLIC TorchVision::TorchVision)
```

The `TorchVision` package will also automatically look for the `Torch` package and add it as a dependency to
`my-target`, so make sure that it is also available to cmake via the `CMAKE_PREFIX_PATH`.

For an example setup, take a look at `examples/cpp/hello_world`.

Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any
Python dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link
to Python. This can be done by passing `-DUSE_PYTHON=on` to CMake.

### TorchVision Operators

In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that
you `#include <torchvision/vision.h>` in your project.
Refer to [example/cpp](https://github.com/pytorch/vision/tree/main/examples/cpp).

**DISCLAIMER**: the `libtorchvision` library includes the torchvision
custom ops as well as most of the C++ torchvision APIs. Those APIs do not come
with any backward-compatibility guarantees and may change from one version to
the next. Only the Python APIs are stable and with backward-compatibility
guarantees. So, if you need stability within a C++ environment, your best bet is
to export the Python APIs via torchscript.

## Documentation

Expand Down
8 changes: 0 additions & 8 deletions cmake/TorchVisionConfig.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,5 @@ if(@WITH_JPEG@)
target_compile_definitions(${PN}::${PN} INTERFACE JPEG_FOUND)
endif()

if (@USE_PYTHON@)
if(NOT TARGET Python3::Python)
find_package(Python3 COMPONENTS Development)
endif()
target_link_libraries(torch INTERFACE Python3::Python)
target_compile_definitions(${PN}::${PN} INTERFACE USE_PYTHON)
endif()

endif()
endif()
1 change: 1 addition & 0 deletions docs/source/io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ Images
encode_jpeg
decode_jpeg
write_jpeg
decode_gif
encode_png
decode_png
write_png
Expand Down
18 changes: 18 additions & 0 deletions examples/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
cmake_minimum_required(VERSION 3.10)
project(run_model)

option(USE_TORCHVISION "Whether to link to torchvision" OFF)

find_package(Torch REQUIRED)
if(USE_TORCHVISION)
find_package(TorchVision REQUIRED)
endif()

add_executable(run_model run_model.cpp)

target_link_libraries(run_model "${TORCH_LIBRARIES}")
if(USE_TORCHVISION)
target_link_libraries(run_model TorchVision::TorchVision)
endif()

set_property(TARGET run_model PROPERTY CXX_STANDARD 17)
101 changes: 101 additions & 0 deletions examples/cpp/README.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
Using torchvision models in C++
===============================

This is a minimal example of getting TorchVision models to work in C++ with
Torchscript. The model is first scripted in Python and exported to a file, and
then loaded in C++. For a similar tutorial, see [this
tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html).

In order to successfully compile this example, make sure you have ``LibTorch``
installed. You can either:

- Install PyTorch normally
- Or download the LibTorch C++ distribution.

In both cases refer [here](https://pytorch.org/get-started/locally/) the
corresponding install or download instructions.

Some torchvision models only depend on PyTorch operators, and can be used in C++
without depending on the torchvision lib. Other models rely on torchvision's C++
operators like NMS, RoiAlign (typically the detection models) and those need to
be linked against the torchvision lib.

We'll first see the simpler case of running a model without the torchvision lib
dependency.

Running a model that doesn't need torchvision lib
-------------------------------------------------

Create a ``build`` directory inside the current one.

```bash
mkdir build
cd build
```

Then run `python ../trace_model.py` which should create a `resnet18.pt` file in
the build directory. This is the scripted model that will be used in the C++
code.

We can now start building with CMake. We have to tell CMake where it can find
the necessary PyTorch resources. If you installed PyTorch normally, you can do:

```bash
TORCH_PATH=$(python -c "import pathlib, torch; print(pathlib.Path(torch.__path__[0]))")
Torch_DIR="${TORCH_PATH}/share/cmake/Torch" # there should be .cmake files in there
cmake .. -DTorch_DIR=$Torch_DIR
```

If instead you downloaded the LibTorch somewhere, you can do:

```bash
cmake .. -DCMAKE_PREFIX_PATH=/path/to/libtorch
```

Then `cmake --build .` and you should now be able to run

```bash
./run_model resnet18.pt
```

If you try to run the model with a model that depends on the torchvision lib, like
`./run_model fasterrcnn_resnet50_fpn.pt`, you should get a runtime error. This is
because the executable wasn't linked against the torchvision lib.


Running a model that needs torchvision lib
------------------------------------------

First, we need to build the torchvision lib. To build the torchvision lib go to
the root of the torchvision project and run:

```bash
mkdir build
cd build
cmake .. -DCMAKE_PREFIX_PATH=/path/to/libtorch # or -DTorch_DIR= if you installed PyTorch normally, see above
cmake --build .
cmake --install .
```

You may want to pass `-DCMAKE_INSTALL_PREFIX=/path/to/libtorchvision` for
cmake to copy/install the files to a specific location (e.g. `$CONDA_PREFIX`).

**DISCLAIMER**: the `libtorchvision` library includes the torchvision
custom ops as well as most of the C++ torchvision APIs. Those APIs do not come
with any backward-compatibility guarantees and may change from one version to
the next. Only the Python APIs are stable and with backward-compatibility
guarantees. So, if you need stability within a C++ environment, your best bet is
to export the Python APIs via torchscript.

Now that libtorchvision is built and installed we can tell our project to use
and link to it via the `-DUSE_TORCHVISION` flag. We also need to tell CMake
where to find it, just like we did with LibTorch, e.g.:

```bash
cmake .. -DTorch_DIR=$Torch_DIR -DTorchVision_DIR=path/to/libtorchvision -DUSE_TORCHVISION=ON
cmake --build .
```

Now the `run_model` executable should be able to run the
`fasterrcnn_resnet50_fpn.pt` file.
20 changes: 0 additions & 20 deletions examples/cpp/hello_world/CMakeLists.txt

This file was deleted.

20 changes: 0 additions & 20 deletions examples/cpp/hello_world/README.rst

This file was deleted.

Loading

0 comments on commit dabe6e3

Please sign in to comment.