diff --git a/.bazelrc b/.bazelrc
index ddaae0155b..b2af832182 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -21,9 +21,9 @@
# | Build Configurations |
# +------------------------------------------------------------+
# Enable colorful output of GCC
-build --cxxopt="-fdiagnostics-color=always"
-build --cxxopt='-std=c++17'
-#build --linkopt="-Wl,--no-as-needed"
+build:default --cxxopt="-fdiagnostics-color=always"
+build:default --cxxopt='-std=c++17'
+#build:default --linkopt="-Wl,--no-as-needed"
build:windows --cxxopt="/GS-" --cxxopt="/std:c++17" --cxxopt="/permissive-"
build:windows --cxxopt="/wd4244" --cxxopt="/wd4267" --cxxopt="/wd4819"
diff --git a/.bazelversion b/.bazelversion
index 024b066c0b..91e4a9f262 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-6.2.1
+6.3.2
diff --git a/.github/workflows/docgen.yml b/.github/workflows/docgen.yml
index 035bb24f6f..1e48d7d262 100644
--- a/.github/workflows/docgen.yml
+++ b/.github/workflows/docgen.yml
@@ -14,11 +14,11 @@ jobs:
if: ${{ ! contains(github.actor, 'pytorchbot') }}
environment: pytorchbot-env
container:
- image: docker.io/pytorch/manylinux-builder:cuda12.1
+ image: docker.io/pytorch/manylinux-builder:cuda12.4
options: --gpus all
env:
- CUDA_HOME: /usr/local/cuda-12.1
- VERSION_SUFFIX: cu121
+ CUDA_HOME: /usr/local/cuda-12.4
+ VERSION_SUFFIX: cu124
CI_BUILD: 1
steps:
- uses: actions/checkout@v3
@@ -33,14 +33,14 @@ jobs:
- name: Install base deps
run: |
python3 -m pip install pip --upgrade
- python3 -m pip install pyyaml numpy torch --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu121
+ python3 -m pip install pyyaml numpy torch --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu124
./packaging/pre_build_script.sh
- name: Get HEAD SHA
id: vars
run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: Build Python Package
run: |
- python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu121
+ python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu124
- name: Generate New Docs
run: |
cd docsrc
@@ -61,4 +61,4 @@ jobs:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
- cancel-in-progress: true
\ No newline at end of file
+ cancel-in-progress: true
diff --git a/README.md b/README.md
index 3539404b12..9b436a2b0b 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ Torch-TensorRT
---
-Torch-TensorRT brings the power of TensorRT to PyTorch. Accelerate inference latency by up to 5x compared to eager execution in just one line of code.
+Torch-TensorRT brings the power of TensorRT to PyTorch. Accelerate inference latency by up to 5x compared to eager execution in just one line of code.
## Installation
@@ -52,7 +52,7 @@ optimized_model(x) # this will be fast!
```
### Option 2: Export
-If you want to optimize your model ahead-of-time and/or deploy in a C++ environment, Torch-TensorRT provides an export-style workflow that serializes an optimized module. This module can be deployed in PyTorch or with libtorch (i.e. without a Python dependency).
+If you want to optimize your model ahead-of-time and/or deploy in a C++ environment, Torch-TensorRT provides an export-style workflow that serializes an optimized module. This module can be deployed in PyTorch or with libtorch (i.e. without a Python dependency).
#### Step 1: Optimize + serialize
```python
@@ -62,7 +62,7 @@ import torch_tensorrt
model = MyModel().eval().cuda() # define your model here
inputs = [torch.randn((1, 3, 224, 224)).cuda()] # define a list of representative inputs here
-trt_gm = torch_tensorrt.compile(model, ir="dynamo", inputs)
+trt_gm = torch_tensorrt.compile(model, ir="dynamo", inputs)
torch_tensorrt.save(trt_gm, "trt.ep", inputs=inputs) # PyTorch only supports Python runtime for an ExportedProgram. For C++ deployment, use a TorchScript file
torch_tensorrt.save(trt_gm, "trt.ts", output_format="torchscript", inputs=inputs)
```
@@ -116,9 +116,9 @@ auto results = trt_mod.forward({input_tensor});
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.
-- Bazel 5.2.0
-- Libtorch 2.4.0.dev (latest nightly) (built with CUDA 12.4)
-- CUDA 12.1
+- Bazel 6.3.2
+- Libtorch 2.5.0.dev (latest nightly) (built with CUDA 12.4)
+- CUDA 12.4
- TensorRT 10.0.1.6
## Deprecation Policy
diff --git a/WORKSPACE b/WORKSPACE
index 6635454ac2..734ce8c85f 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -43,7 +43,13 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-12.1/",
+ path = "/usr/local/cuda-12.4/",
+)
+
+new_local_repository(
+ name = "cuda_win",
+ build_file = "@//third_party/cuda:BUILD",
+ path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.4/",
)
#############################################################################################################
@@ -54,14 +60,21 @@ http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu124/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-latest.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu124/libtorch-shared-with-deps-latest.zip"],
+)
+
+http_archive(
+ name = "libtorch_win",
+ build_file = "@//third_party/libtorch:BUILD",
+ strip_prefix = "libtorch",
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu124/libtorch-win-shared-with-deps-latest.zip"],
)
# Download these tarballs manually from the NVIDIA website
@@ -78,6 +91,18 @@ http_archive(
],
)
+http_archive(
+ name = "tensorrt_win",
+ build_file = "@//third_party/tensorrt/archive:BUILD",
+ sha256 = "d667bd10b178e239b621a8929008ef3e27967d181bf07a39845a0f99edeec47a",
+ strip_prefix = "TensorRT-10.0.1.6",
+ urls = [
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.0.1/zip/TensorRT-10.0.1.6.Windows10.win10.cuda-12.4.zip",
+ ],
+)
+
+
+
####################################################################################
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
####################################################################################
diff --git a/core/BUILD b/core/BUILD
index 23ef0bfec0..4fd1a7cf64 100644
--- a/core/BUILD
+++ b/core/BUILD
@@ -17,6 +17,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "core",
srcs = [
@@ -31,10 +38,10 @@ cc_library(
"//core/partitioning",
"//core/runtime",
"//core/util/logging",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/BUILD b/core/conversion/BUILD
index aa40c58ed7..d00defcbd8 100644
--- a/core/conversion/BUILD
+++ b/core/conversion/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "conversion",
srcs = [
@@ -26,10 +33,10 @@ cc_library(
"//core/conversion/var",
"//core/ir",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/conversionctx/BUILD b/core/conversion/conversionctx/BUILD
index b52128a7f0..0e910d7127 100644
--- a/core/conversion/conversionctx/BUILD
+++ b/core/conversion/conversionctx/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "conversionctx",
srcs = [
@@ -21,10 +28,10 @@ cc_library(
deps = [
"//core/ir",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/converters/BUILD b/core/conversion/converters/BUILD
index e91b9b4b03..062f700cab 100644
--- a/core/conversion/converters/BUILD
+++ b/core/conversion/converters/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "weights",
srcs = [
@@ -21,10 +28,10 @@ cc_library(
deps = [
"//core/conversion/conversionctx",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
@@ -41,10 +48,10 @@ cc_library(
":weights",
"//core/conversion/conversionctx",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
@@ -98,10 +105,10 @@ cc_library(
"//core/conversion/var",
"//core/plugins:torch_tensorrt_plugins",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/evaluators/BUILD b/core/conversion/evaluators/BUILD
index 03789336be..5116f3f76f 100644
--- a/core/conversion/evaluators/BUILD
+++ b/core/conversion/evaluators/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "evaluators",
srcs = [
@@ -28,6 +35,7 @@ cc_library(
"//core/conversion/var",
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/conversion/tensorcontainer/BUILD b/core/conversion/tensorcontainer/BUILD
index 187a84b787..c07e8f5516 100644
--- a/core/conversion/tensorcontainer/BUILD
+++ b/core/conversion/tensorcontainer/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "tensorcontainer",
srcs = [
@@ -20,10 +27,10 @@ cc_library(
],
deps = [
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/var/BUILD b/core/conversion/var/BUILD
index b0df6b02f8..7042a1f402 100644
--- a/core/conversion/var/BUILD
+++ b/core/conversion/var/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "var",
srcs = [
@@ -23,10 +30,10 @@ cc_library(
"//core/conversion/converters:converter_util",
"//core/conversion/tensorcontainer",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/ir/BUILD b/core/ir/BUILD
index 11b0c09ba4..1e4640f08f 100644
--- a/core/ir/BUILD
+++ b/core/ir/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "ir",
srcs = [
@@ -23,10 +30,10 @@ cc_library(
],
deps = [
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/lowering/BUILD b/core/lowering/BUILD
index 81ad86e906..2e9d236b05 100644
--- a/core/lowering/BUILD
+++ b/core/lowering/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "lowering",
srcs = [
@@ -26,6 +33,7 @@ cc_library(
"//core/lowering/passes",
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/lowering/passes/BUILD b/core/lowering/passes/BUILD
index ae1b06bfcf..5a99139db3 100644
--- a/core/lowering/passes/BUILD
+++ b/core/lowering/passes/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "passes",
srcs = [
@@ -49,6 +56,7 @@ cc_library(
deps = [
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/partitioning/BUILD b/core/partitioning/BUILD
index 2acfec2cc4..784f20c719 100644
--- a/core/partitioning/BUILD
+++ b/core/partitioning/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "partitioning",
srcs = [
@@ -29,6 +36,7 @@ cc_library(
"//core/partitioning/segmentedblock",
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/partitioning/partitioningctx/BUILD b/core/partitioning/partitioningctx/BUILD
index dec1a1f992..c595ca001d 100644
--- a/core/partitioning/partitioningctx/BUILD
+++ b/core/partitioning/partitioningctx/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "partitioningctx",
srcs = [
@@ -25,6 +32,7 @@ cc_library(
"//core/partitioning/segmentedblock",
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/partitioning/partitioninginfo/BUILD b/core/partitioning/partitioninginfo/BUILD
index a4c1031295..96194bf629 100644
--- a/core/partitioning/partitioninginfo/BUILD
+++ b/core/partitioning/partitioninginfo/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "partitioninginfo",
srcs = [
@@ -24,6 +31,7 @@ cc_library(
"//core/lowering",
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/partitioning/segmentedblock/BUILD b/core/partitioning/segmentedblock/BUILD
index 0ab6246c81..44cd1da98d 100644
--- a/core/partitioning/segmentedblock/BUILD
+++ b/core/partitioning/segmentedblock/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "segmentedblock",
srcs = [
@@ -24,6 +31,7 @@ cc_library(
"//core/lowering",
"//core/util:prelude",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/core/plugins/BUILD b/core/plugins/BUILD
index 6fcb0e8934..2b7a28848f 100644
--- a/core/plugins/BUILD
+++ b/core/plugins/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "torch_tensorrt_plugins",
srcs = [
@@ -30,11 +37,10 @@ cc_library(
],
deps = [
"//core/util:prelude",
- "@tensorrt//:nvinfer",
- "@tensorrt//:nvinferplugin",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@tensorrt_win//:nvinferplugin", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@tensorrt//:nvinferplugin", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@tensorrt//:nvinferplugin", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/runtime/BUILD b/core/runtime/BUILD
index ef70610a39..9c8f56f5af 100644
--- a/core/runtime/BUILD
+++ b/core/runtime/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "runtime",
srcs = [
@@ -33,10 +40,10 @@ cc_library(
deps = [
"//core/plugins:torch_tensorrt_plugins",
"//core/util:prelude",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/util/BUILD b/core/util/BUILD
index be44122d8d..56e5182160 100644
--- a/core/util/BUILD
+++ b/core/util/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "prelude",
hdrs = [
@@ -33,6 +40,7 @@ cc_library(
deps = [
":macros",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -65,11 +73,10 @@ cc_library(
hdrs = [
"build_info.h",
],
- deps = [
- "@tensorrt//:nvinfer",
- ] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ deps = select({
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
)
@@ -84,10 +91,10 @@ cc_library(
deps = [
":macros",
"//core/util/logging",
- "@tensorrt//:nvinfer",
] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/util/logging/BUILD b/core/util/logging/BUILD
index 802d1cc18d..4796d5def1 100644
--- a/core/util/logging/BUILD
+++ b/core/util/logging/BUILD
@@ -10,6 +10,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_library(
name = "logging",
srcs = [
@@ -18,11 +25,10 @@ cc_library(
hdrs = [
"TorchTRTLogger.h",
],
- deps = [
- "@tensorrt//:nvinfer",
- ] + select({
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch"],
+ deps = select({
+ ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
+ ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
+ "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
}),
alwayslink = True,
)
diff --git a/cpp/bin/torchtrtc/BUILD b/cpp/bin/torchtrtc/BUILD
index ba0e4eb2c0..00e2490daa 100644
--- a/cpp/bin/torchtrtc/BUILD
+++ b/cpp/bin/torchtrtc/BUILD
@@ -9,6 +9,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_binary(
name = "torchtrtc",
srcs = [
@@ -28,6 +35,10 @@ cc_binary(
"//cpp:torch_tensorrt",
"//third_party/args",
] + select({
+ ":windows": [
+ "@libtorch_win//:caffe2",
+ "@libtorch_win//:libtorch",
+ ],
":use_pre_cxx11_abi": [
"@libtorch_pre_cxx11_abi//:caffe2",
"@libtorch_pre_cxx11_abi//:libtorch",
diff --git a/dev_dep_versions.yml b/dev_dep_versions.yml
index ec7f75f599..5c560c41f9 100644
--- a/dev_dep_versions.yml
+++ b/dev_dep_versions.yml
@@ -1,3 +1,3 @@
-__version__: "2.4.0.dev0"
-__cuda_version__: "12.1"
-__tensorrt_version__: "10.0.1"
\ No newline at end of file
+__version__: "2.5.0.dev0"
+__cuda_version__: "12.4"
+__tensorrt_version__: "10.0.1"
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1cb69cd799..10014cd325 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,9 +1,9 @@
# syntax=docker/dockerfile:1
# Base image starts with CUDA
-ARG BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04
+ARG BASE_IMG=nvidia/cuda:12.4.1-devel-ubuntu22.04
FROM ${BASE_IMG} as base
-ENV BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04
+ENV BASE_IMG=nvidia/cuda:12.4.1-devel-ubuntu22.04
ARG TENSORRT_VERSION
ENV TENSORRT_VERSION=${TENSORRT_VERSION}
@@ -36,7 +36,7 @@ RUN pyenv global ${PYTHON_VERSION}
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
RUN apt-get update
-RUN TENSORRT_MAJOR_VERSION=`echo ${TENSORRT_VERSION} | cut -d '.' -f 1` && apt-get install -y libnvinfer${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvinfer-plugin${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.*
+RUN TENSORRT_MAJOR_VERSION=`echo ${TENSORRT_VERSION} | cut -d '.' -f 1` && apt-get install -y libnvinfer${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvinfer-plugin${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers${TENSORRT_MAJOR_VERSION}=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.*
# Setup Bazel via Bazelisk
RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\
diff --git a/docker/dist-build.sh b/docker/dist-build.sh
index 0bc2fc09f9..1f1faec6ed 100755
--- a/docker/dist-build.sh
+++ b/docker/dist-build.sh
@@ -3,9 +3,9 @@
TOP_DIR=$(cd $(dirname $0); pwd)/..
if [[ -z "${USE_CXX11}" ]]; then
- BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -w dist"
+ BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu124 -w dist"
else
- BUILD_CMD="python -m pip wheel . --config-setting="--build-option=--use-cxx11-abi" --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -w dist"
+ BUILD_CMD="python -m pip wheel . --config-setting="--build-option=--use-cxx11-abi" --extra-index-url https://download.pytorch.org/whl/nightly/cu124 -w dist"
fi
# TensorRT restricts our pip version
diff --git a/docsrc/getting_started/getting_started_with_windows.rst b/docsrc/getting_started/getting_started_with_windows.rst
index c2091e354f..a90221b532 100644
--- a/docsrc/getting_started/getting_started_with_windows.rst
+++ b/docsrc/getting_started/getting_started_with_windows.rst
@@ -7,133 +7,28 @@ Torch-TensorRT has community support for Windows platform using CMake
Prerequisite:
-* Microsoft Visual Studio
-* LibTorch
-* TensorRT
+* Microsoft VS 2022 Tools
+* Bazelisk
* CUDA
-Build configuration
+Build steps
-------------------
-* Open Microsoft Visual Studio
-* Open Torch-TensorRT source code folder
-* Open Manage configurations -> Edit JSON to open CMakeSettings.json file.
-* Configure the CMake build configurations. Following is an example configuration:
-
-.. code-block:: none
-
- {
- "configurations": [
- {
- "name": "x64-Debug",
- "generator": "Ninja",
- "configurationType": "Debug",
- "inheritEnvironments": [ "msvc_x64_x64" ],
- "buildRoot": "${projectDir}\\out\\build\\${name}",
- "installRoot": "${projectDir}\\out\\install\\${name}",
- "cmakeCommandArgs": "-S . -B out",
- "buildCommandArgs": "cmake --build out",
- "ctestCommandArgs": "",
- "variables": [
- {
- "name": "CMAKE_MODULE_PATH",
- "value": "$PWD\cmake\Modules",
- "type": "FILEPATH"
- },
- {
- "name": "Torch_DIR",
- "value": "\share\cmake\Torch",
- "type": "FILEPATH"
- },
- {
- "name": "TensorRT_ROOT",
- "value": "",
- "type": "FILEPATH"
- },
- {
- "name": "CMAKE_BUILD_TYPE",
- "value": "Release",
- "type": " STRING"
- }
- ]
- }
- ]
- }
-
-
-Compilation
------------
-
-* Click Build -> Build All or directly press Ctrl + Shift + B
-
-Note: After successful compilation, the build artifacts will be present at buildRoot path configured.
-
-Installation
-------------
-
-* Build -> Install Torch-TensorRT
-
-Note: After successful installation, the artifacts will be present at installRoot.
-
-
-Building With Visual Studio Code
-==================================
-
-1. Install Visual Studio Code
-2. Install Build Tools for Visual Studio 2022
-
- - Select "Desktop Development with C++"
- > Currently, this installs MSVC v143 - 2022. There are also options to install previous 2019/2017/2015 editions of MSVC
- > License term "1b Build Tools additional use right" allows using Build Tools to compile Open Source Dependencies
- > Also allows using Build Tools to develop and test Open Source Dependencies, to the minor extend of ensuring compatibility with Build Tools
-
-3. Install CUDA (e.g. 11.7.1)
-
-4. Install `TensorRT` (e.g 8.5.1.7)
-
- - Set ``TensorRT_ROOT``
- - Add ``TensorRT_ROOT\lib`` to ``PATH``
-
-5. Install "libtorch-win-shared-with-deps-latest.zip"
-
- - Select build targeting the appropriate CUDA version
- - Set ``Torch_DIR``
- - Add ``Torch_DIR\lib`` to ``PATH``
-
-6. Clone TensorRT repo
-7. Install C++ and CMake Tools extensions from MS
-
- - Change build to ``RelWithDebInfo``
-
-8. Update ``.vscode\settings.json``
-
- - Clean, configure, build
-
-e.g. /.vscode/settings.json
+* Open the app "x64 Native Tools Command Prompt for VS 2022" - note that Admin priveleges may be necessary
+* Ensure Bazelisk (Bazel launcher) is installed on your machine and available from the command line. Package installers such as Chocolatey can be used to install Bazelisk
+* Install latest version of Torch (i.e. with `pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu124`)
+* Clone the Torch-TensorRT repository and navigate to its root directory
+* Run `pip install ninja wheel setuptools`
+* Run `pip install --pre -r py/requirements.txt`
+* Run `set DISTUTILS_USE_SDK=1`
+* Run `python setup.py bdist_wheel`
+* Run `pip install dist/*.whl`
+
+Advanced setup and Troubleshooting
+-------------------
+In the `WORKSPACE` file, the `cuda_win`, `libtorch_win`, and `tensorrt_win` are Windows-specific modules which can be customized. For instance, if you would like to build with a different version of CUDA, or your CUDA installation is in a non-standard location, update the `path` in the `cuda_win` module.
-.. code-block:: json
+Similarly, if you would like to use a different version of pytorch or tensorrt, customize the `urls` in the `libtorch_win` and `tensorrt_win` modules, respectively.
- {
- "cmake.generator": "Ninja",
- "cmake.configureSettings": {
- "CMAKE_MODULE_PATH": {
- "type": "FILEPATH",
- "value": "$PWD\\cmake\\Modules"
- },
- "CMAKE_CXX_FLAGS": {
- "type": "STRING",
- "value": "-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING"
- },
- "Torch_DIR": {
- "type": "FILEPATH",
- "value": "X:\\libtorch\\share\\cmake\\Torch"
- },
- "TensorRT_ROOT": {
- "type": "FILEPATH",
- "value": "X:\\path\\to\\tensorrt"
- },
- "CMAKE_CUDA_FLAGS": "-allow-unsupported-compiler"
- },
- "cmake.buildDirectory": "${workspaceFolder}/torch_tensorrt_build"
- }
+Local versions of these packages can also be used on Windows. See `toolchains\ci_workspaces\WORKSPACE.win.release.tmpl` for an example of using a local version of TensorRT on Windows.
diff --git a/docsrc/getting_started/installation.rst b/docsrc/getting_started/installation.rst
index a70bc7838e..379756c347 100644
--- a/docsrc/getting_started/installation.rst
+++ b/docsrc/getting_started/installation.rst
@@ -44,7 +44,7 @@ Torch-TensorRT distributed nightlies targeting the PyTorch nightly. These can be
.. code-block:: sh
- python -m pip install --pre torch torch-tensorrt tensorrt --extra-index-url https://download.pytorch.org/whl/nightly/cu121
+ python -m pip install --pre torch torch-tensorrt tensorrt --extra-index-url https://download.pytorch.org/whl/nightly/cu124
@@ -126,14 +126,14 @@ Once the WORKSPACE has been configured properly, all that is required to build t
.. code-block:: sh
- python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu121
+ python -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu124
To build the wheel file
.. code-block:: sh
- python -m pip wheel --no-deps --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -w dist
+ python -m pip wheel --no-deps --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu124 -w dist
Building the C++ Library (TorchScript Only)
diff --git a/examples/int8/ptq/BUILD b/examples/int8/ptq/BUILD
index c4e9d8999a..c9fa200220 100644
--- a/examples/int8/ptq/BUILD
+++ b/examples/int8/ptq/BUILD
@@ -19,6 +19,8 @@ cc_binary(
"//examples/int8/datasets:cifar10",
"@libtorch",
"@libtorch//:caffe2",
- "@tensorrt//:nvinfer",
- ],
+ ] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ })
)
diff --git a/examples/int8/qat/BUILD b/examples/int8/qat/BUILD
index 7c32e0b14b..e97398d932 100644
--- a/examples/int8/qat/BUILD
+++ b/examples/int8/qat/BUILD
@@ -19,6 +19,8 @@ cc_binary(
"//examples/int8/datasets:cifar10",
"@libtorch",
"@libtorch//:caffe2",
- "@tensorrt//:nvinfer",
- ],
+ ] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ })
)
diff --git a/examples/torchtrt_runtime_example/BUILD b/examples/torchtrt_runtime_example/BUILD
index 1765f2bae8..957caedd23 100644
--- a/examples/torchtrt_runtime_example/BUILD
+++ b/examples/torchtrt_runtime_example/BUILD
@@ -11,6 +11,8 @@ cc_binary(
"//core/runtime",
"@libtorch",
"@libtorch//:caffe2",
- "@tensorrt//:nvinfer",
- ],
+ ] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ })
)
diff --git a/py/ci/Dockerfile.ci b/py/ci/Dockerfile.ci
index 28aed1c670..16495de77d 100644
--- a/py/ci/Dockerfile.ci
+++ b/py/ci/Dockerfile.ci
@@ -1,4 +1,4 @@
-FROM pytorch/manylinux-builder:cuda12.1
+FROM pytorch/manylinux-builder:cuda12.4
RUN yum install -y ninja-build
@@ -17,4 +17,4 @@ RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazeli
RUN mkdir /workspace
-WORKDIR /workspace
\ No newline at end of file
+WORKDIR /workspace
diff --git a/py/ci/build_whl.sh b/py/ci/build_whl.sh
index 2303efb83a..d8f91da686 100755
--- a/py/ci/build_whl.sh
+++ b/py/ci/build_whl.sh
@@ -3,15 +3,15 @@
# Example usage: docker run -it -v$(pwd):/workspace/TensorRT build_torch_tensorrt_wheel /bin/bash /workspace/TensorRT/py/ci/build_whl.sh
export CXX=g++
-export CUDA_HOME=/usr/local/cuda-12.1
+export CUDA_HOME=/usr/local/cuda-12.4
export PROJECT_DIR=/workspace/TensorRT
rm -rf /usr/local/cuda
-if [[ $CUDA_HOME == "/usr/local/cuda-12.1" ]]; then
+if [[ $CUDA_HOME == "/usr/local/cuda-12.4" ]]; then
cp -r /usr/local/cuda-11.8 /usr/local/cuda
cp -r /usr/local/cuda-12.0/ /usr/local/cuda/
- rsync -a /usr/local/cuda-12.1/ /usr/local/cuda/
+ rsync -a /usr/local/cuda-12.4/ /usr/local/cuda/
export CUDA_HOME=/usr/local/cuda
else
ln -s $CUDA_HOME /usr/local/cuda
diff --git a/py/requirements.txt b/py/requirements.txt
index a5829f4a50..c68bdc4d4c 100644
--- a/py/requirements.txt
+++ b/py/requirements.txt
@@ -1,8 +1,8 @@
numpy
packaging
pybind11==2.6.2
---extra-index-url https://download.pytorch.org/whl/nightly/cu121
-torch>=2.4.0.dev,<2.5.0
+--extra-index-url https://download.pytorch.org/whl/nightly/cu124
+torch>=2.5.0.dev,<2.6.0
torchvision>=0.19.0.dev,<0.20.0
--extra-index-url https://pypi.ngc.nvidia.com
pyyaml
diff --git a/py/torch_tensorrt/_C.cp310-win_amd64.pyd b/py/torch_tensorrt/_C.cp310-win_amd64.pyd
new file mode 100644
index 0000000000..2be0d89e54
Binary files /dev/null and b/py/torch_tensorrt/_C.cp310-win_amd64.pyd differ
diff --git a/py/torch_tensorrt/__init__.py b/py/torch_tensorrt/__init__.py
index d12c32f8b1..442afb0bc0 100644
--- a/py/torch_tensorrt/__init__.py
+++ b/py/torch_tensorrt/__init__.py
@@ -56,7 +56,7 @@ def _find_lib(name: str, paths: List[str]) -> str:
ctypes.CDLL(_find_lib(lib, WIN_PATHS))
elif sys.platform.startswith("linux"):
- LINUX_PATHS = ["/usr/local/cuda-12.1/lib64", "/usr/lib", "/usr/lib64"]
+ LINUX_PATHS = ["/usr/local/cuda-12.4/lib64", "/usr/lib", "/usr/lib64"]
if "LD_LIBRARY_PATH" in os.environ:
LINUX_PATHS += os.environ["LD_LIBRARY_PATH"].split(os.path.pathsep)
diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py
index c46eb6ea35..f1e9ff23ea 100644
--- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py
+++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py
@@ -199,12 +199,10 @@ def aten_ops_native_group_norm(
@dynamo_tensorrt_converter(
torch.ops.aten.group_norm.default,
- capability_validator=one_user_validator,
supports_dynamic_shapes=True,
)
@dynamo_tensorrt_converter(
torch.ops.aten.group_norm,
- capability_validator=one_user_validator,
supports_dynamic_shapes=True,
)
@enforce_tensor_types(
diff --git a/pyproject.toml b/pyproject.toml
index e8e16d5f3d..f9e1f27690 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,7 +9,7 @@ requires = [
"typing-extensions>=4.7.0",
"future>=0.18.3",
"tensorrt==10.0.1",
- "torch >=2.4.0.dev,<2.5.0",
+ "torch >=2.5.0.dev,<2.6.0",
"pybind11==2.6.2",
"numpy",
]
@@ -41,7 +41,7 @@ readme = {file = "py/README.md", content-type = "text/markdown"}
requires-python = ">=3.8"
keywords = ["pytorch", "torch", "tensorrt", "trt", "ai", "artificial intelligence", "ml", "machine learning", "dl", "deep learning", "compiler", "dynamo", "torchscript", "inference"]
dependencies = [
- "torch >=2.4.0.dev,<2.5.0",
+ "torch >=2.5.0.dev,<2.6.0",
"tensorrt==10.0.1",
"tensorrt-cu12_bindings==10.0.1",
"tensorrt-cu12_libs==10.0.1",
diff --git a/setup.py b/setup.py
index d1ec6c2f0b..c74fb9e130 100644
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,7 @@ def load_dep_info():
dir_path = os.path.join(str(get_root_dir()), "py")
-CXX11_ABI = False
+CXX11_ABI = IS_WINDOWS
JETPACK_VERSION = None
PY_ONLY = False
NO_TS = False
@@ -197,6 +197,8 @@ def build_libtorchtrt_pre_cxx11_abi(
if IS_WINDOWS:
cmd.append("--config=windows")
+ else:
+ cmd.append("--config=default")
if JETPACK_VERSION == "4.5":
cmd.append("--platforms=//toolchains:jetpack_4.5")
@@ -493,16 +495,30 @@ def run(self):
"/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu",
],
libraries=["torchtrt"],
- include_dirs=[
- dir_path + "torch_tensorrt/csrc",
- dir_path + "torch_tensorrt/include",
- dir_path + "/../bazel-TRTorch/external/tensorrt/include",
- dir_path + "/../bazel-Torch-TensorRT/external/tensorrt/include",
- dir_path + "/../bazel-TensorRT/external/tensorrt/include",
- dir_path + "/../bazel-tensorrt/external/tensorrt/include",
- dir_path + "/../",
- "/usr/local/cuda",
- ],
+ include_dirs=(
+ [
+ dir_path + "torch_tensorrt/csrc",
+ dir_path + "torch_tensorrt/include",
+ dir_path + "/../",
+ "/usr/local/cuda",
+ ]
+ + (
+ [
+ dir_path + "/../bazel-TRTorch/external/tensorrt_win/include",
+ dir_path
+ + "/../bazel-Torch-TensorRT/external/tensorrt_win/include",
+ dir_path + "/../bazel-TensorRT/external/tensorrt_win/include",
+ dir_path + "/../bazel-tensorrt/external/tensorrt_win/include",
+ ]
+ if IS_WINDOWS
+ else [
+ dir_path + "/../bazel-TRTorch/external/tensorrt/include",
+ dir_path + "/../bazel-Torch-TensorRT/external/tensorrt/include",
+ dir_path + "/../bazel-TensorRT/external/tensorrt/include",
+ dir_path + "/../bazel-tensorrt/external/tensorrt/include",
+ ]
+ )
+ ),
extra_compile_args=(
[
f'/DPYBIND11_BUILD_ABI=\\"{torch._C._PYBIND11_BUILD_ABI}\\"',
diff --git a/tests/core/BUILD b/tests/core/BUILD
index 19dedd4fe5..cc18d03ea7 100644
--- a/tests/core/BUILD
+++ b/tests/core/BUILD
@@ -7,6 +7,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
filegroup(
name = "jit_models",
srcs = ["//tests/modules:mobilenet_v2_scripted.jit.pt"],
@@ -22,6 +29,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/tests/core/conversion/converters/converter_test.bzl b/tests/core/conversion/converters/converter_test.bzl
index eecabca692..b17ae8e862 100644
--- a/tests/core/conversion/converters/converter_test.bzl
+++ b/tests/core/conversion/converters/converter_test.bzl
@@ -19,6 +19,7 @@ def converter_test(name, visibility = None):
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
diff --git a/tests/core/conversion/evaluators/evaluator_test.bzl b/tests/core/conversion/evaluators/evaluator_test.bzl
index 04ef178786..6dd5f41040 100644
--- a/tests/core/conversion/evaluators/evaluator_test.bzl
+++ b/tests/core/conversion/evaluators/evaluator_test.bzl
@@ -19,6 +19,7 @@ def evaluator_test(name, visibility = None):
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
diff --git a/tests/core/lowering/BUILD b/tests/core/lowering/BUILD
index 658d66fc46..b2d3609ccf 100644
--- a/tests/core/lowering/BUILD
+++ b/tests/core/lowering/BUILD
@@ -8,6 +8,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
lowering_test(
name = "test_linear_to_addmm",
)
@@ -22,6 +29,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/tests/core/lowering/lowering_test.bzl b/tests/core/lowering/lowering_test.bzl
index 6ae8e09395..ecdac73778 100644
--- a/tests/core/lowering/lowering_test.bzl
+++ b/tests/core/lowering/lowering_test.bzl
@@ -19,6 +19,7 @@ def lowering_test(name, visibility = None):
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
diff --git a/tests/core/partitioning/BUILD b/tests/core/partitioning/BUILD
index 974a9b0e4d..ed2328897c 100644
--- a/tests/core/partitioning/BUILD
+++ b/tests/core/partitioning/BUILD
@@ -8,6 +8,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
filegroup(
name = "jit_models",
srcs = [
@@ -54,6 +61,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -69,6 +77,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -84,6 +93,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -99,6 +109,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/tests/core/partitioning/partitioning_test.bzl b/tests/core/partitioning/partitioning_test.bzl
index fb6dd50991..322721aa93 100644
--- a/tests/core/partitioning/partitioning_test.bzl
+++ b/tests/core/partitioning/partitioning_test.bzl
@@ -19,6 +19,7 @@ def partitioning_test(name, visibility = None):
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
diff --git a/tests/core/runtime/runtime_test.bzl b/tests/core/runtime/runtime_test.bzl
index 2c4753cc9b..021321c40b 100644
--- a/tests/core/runtime/runtime_test.bzl
+++ b/tests/core/runtime/runtime_test.bzl
@@ -19,6 +19,7 @@ def runtime_test(name, visibility = None):
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch//:libtorch"],
}),
diff --git a/tests/cpp/BUILD b/tests/cpp/BUILD
index 783bf2895e..2917f57130 100644
--- a/tests/cpp/BUILD
+++ b/tests/cpp/BUILD
@@ -9,6 +9,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
test_suite(
name = "api_tests",
tests = [
@@ -86,6 +93,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -124,6 +132,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -139,6 +148,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -151,6 +161,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -166,6 +177,7 @@ cc_test(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
@@ -201,6 +213,7 @@ cc_library(
"//tests/util",
"@googletest//:gtest_main",
] + select({
+ ":windows": ["@libtorch_win//:libtorch"],
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
diff --git a/tests/util/BUILD b/tests/util/BUILD
index 87b7da9cda..c49b88acf3 100644
--- a/tests/util/BUILD
+++ b/tests/util/BUILD
@@ -9,6 +9,13 @@ config_setting(
},
)
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
config_setting(
name = "ci_build_testing",
values = {
@@ -30,8 +37,14 @@ cc_library(
],
deps = [
"@googletest//:gtest_main",
- "@tensorrt//:nvinfer",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": [
+ "@libtorch_win//:caffe2",
+ "@libtorch_win//:libtorch",
+ ],
":use_pre_cxx11_abi": [
"@libtorch_pre_cxx11_abi//:caffe2",
"@libtorch_pre_cxx11_abi//:libtorch",
diff --git a/third_party/tensorrt/archive/BUILD b/third_party/tensorrt/archive/BUILD
index 5c07794a20..c9b32dc23a 100644
--- a/third_party/tensorrt/archive/BUILD
+++ b/third_party/tensorrt/archive/BUILD
@@ -34,7 +34,19 @@ cc_library(
cc_import(
name = "nvinfer_lib",
- shared_library = "lib/libnvinfer.so",
+ shared_library = select({
+ ":windows": "lib/nvinfer_10.dll",
+ "//conditions:default": "lib/libnvinfer.so",
+ }),
+ visibility = ["//visibility:private"],
+)
+
+cc_import(
+ name = "nvinfer_static_lib",
+ static_library = select({
+ ":windows": "lib/nvinfer_10.lib",
+ "//conditions:default": "lib/libnvinfer_static.a",
+ }),
visibility = ["//visibility:private"],
)
@@ -44,15 +56,20 @@ cc_library(
deps = [
"nvinfer_headers",
"nvinfer_lib",
- "@cuda//:cudart",
- ],
+ ] + select({
+ ":windows": ["@cuda_win//:cudart", "nvinfer_static_lib"],
+ "//conditions:default": ["@cuda//:cudart"],
+ }),
)
####################################################################################
cc_import(
name = "nvparsers_lib",
- shared_library = "lib/libnvparsers.so",
+ shared_library = select({
+ ":windows": "lib/nvparsers.dll",
+ "//conditions:default": "lib/libnvparsers.so",
+ }),
visibility = ["//visibility:private"],
)
@@ -83,7 +100,10 @@ cc_library(
cc_import(
name = "nvonnxparser_lib",
- shared_library = "lib/libnvonnxparser.so",
+ shared_library = select({
+ ":windows": "lib/nvonnxparser*.dll",
+ "//conditions:default": "lib/libnvonnxparser*.so",
+ }),
visibility = ["//visibility:private"],
)
@@ -112,7 +132,10 @@ cc_library(
cc_import(
name = "nvonnxparser_runtime_lib",
- shared_library = "lib/libnvonnxparser_runtime.so",
+ shared_library = select({
+ ":windows": "lib/nvonnxparser_runtime.dll",
+ "//conditions:default": "lib/libnvonnxparser_runtime.so",
+ }),
visibility = ["//visibility:public"],
)
@@ -137,7 +160,10 @@ cc_library(
cc_import(
name = "nvcaffeparser_lib",
- shared_library = "lib/libnvcaffe_parsers.so",
+ shared_library = select({
+ ":windows": "lib/nvcaffe_parsers.dll",
+ "//conditions:default": "lib/libnvcaffe_parsers.so",
+ }),
visibility = ["//visibility:private"],
)
@@ -160,26 +186,21 @@ cc_library(
####################################################################################
-cc_import(
- name = "nvinferplugin_lib",
- shared_library = "lib/libnvinfer_plugin.so",
- visibility = ["//visibility:private"],
-)
-
cc_library(
- name = "nvinferplugin_headers",
+ name = "nvinferplugin",
+ srcs = select({
+ ":windows": ["lib/nvinfer_plugin_10.lib"],
+ "//conditions:default": ["lib/libnvinfer_plugin.so"],
+ }),
hdrs = glob(["include/NvInferPlugin*.h"]),
+ copts = ["-pthread",],
includes = ["include/"],
- visibility = ["//visibility:private"],
-)
-
-cc_library(
- name = "nvinferplugin",
- visibility = ["//visibility:public"],
+ linkopts = ["-lpthread"],
deps = [
"nvinfer",
- "nvinferplugin_headers",
- "nvinferplugin_lib",
- "@cuda//:cudart",
- ],
+ ] + select({
+ ":windows": ["@cuda_win//:cudart"],
+ "//conditions:default": ["@cuda//:cudart"],
+ }),
+ alwayslink = True,
)
diff --git a/third_party/tensorrt/local/BUILD b/third_party/tensorrt/local/BUILD
index a755ecf862..5b7bd17c85 100644
--- a/third_party/tensorrt/local/BUILD
+++ b/third_party/tensorrt/local/BUILD
@@ -103,10 +103,9 @@ cc_library(
deps = [
"nvinfer_headers",
"nvinfer_lib",
- "@cuda//:cudart",
] + select({
- ":windows": ["nvinfer_static_lib"],
- "//conditions:default": [],
+ ":windows": ["@cuda_win//:cudart", "nvinfer_static_lib"],
+ "//conditions:default": ["@cuda//:cudart"],
}),
)
@@ -359,7 +358,9 @@ cc_library(
}),
deps = [
"nvinfer",
- "@cuda//:cudart",
- ],
+ ] + select({
+ ":windows": ["@cuda_win//:cudart"],
+ "//conditions:default": ["@cuda//:cudart"],
+ }),
alwayslink = True,
)
diff --git a/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl b/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl
index 40ebf12494..b505efb944 100644
--- a/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl
+++ b/toolchains/ci_workspaces/WORKSPACE.win.release.tmpl
@@ -41,7 +41,7 @@ local_repository(
# CUDA should be installed on the system locally
new_local_repository(
- name = "cuda",
+ name = "cuda_win",
build_file = "@//third_party/cuda:BUILD",
path = "${CUDA_HOME}",
)
@@ -51,14 +51,7 @@ new_local_repository(
#############################################################################################################
http_archive(
- name = "libtorch",
- build_file = "@//third_party/libtorch:BUILD",
- strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
-)
-
-http_archive(
- name = "libtorch_pre_cxx11_abi",
+ name = "libtorch_win",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
@@ -69,7 +62,7 @@ http_archive(
####################################################################################
new_local_repository(
- name = "tensorrt",
+ name = "tensorrt_win",
path = "C:/TensorRT-10.0.1.6",
build_file = "@//third_party/tensorrt/local:BUILD"
)
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64 b/toolchains/ci_workspaces/WORKSPACE.x86_64
index d8a346b680..1428ec439a 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64
@@ -43,7 +43,7 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-12.1/",
+ path = "/usr/local/cuda-12.4/",
)
new_local_repository(
@@ -90,4 +90,4 @@ pip_parse(
load("@devtools_deps//:requirements.bzl", "install_deps")
-install_deps()
\ No newline at end of file
+install_deps()
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel.tmpl b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel.tmpl
index ba7f11edd9..3dcde3cf56 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel.tmpl
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel.tmpl
@@ -58,14 +58,14 @@ http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu124/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-latest.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu124/libtorch-shared-with-deps-latest.zip"],
)
http_archive(
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
index 28e1f3184c..e7bad3d30f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
@@ -43,7 +43,7 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-12.1",
+ path = "/usr/local/cuda-12.4",
)
new_local_repository(
@@ -94,4 +94,4 @@ pip_parse(
load("@devtools_deps//:requirements.bzl", "install_deps")
-install_deps()
\ No newline at end of file
+install_deps()
diff --git a/version.txt b/version.txt
index e96f44fb3e..b8feefb940 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-2.4.0a0
+2.5.0a0