@@ -4,10 +4,10 @@ module(
44 version = "${BUILD_VERSION}"
55)
66
7- bazel_dep(name = "googletest", version = "1.14 .0")
8- bazel_dep(name = "platforms", version = "0.0.10 ")
9- bazel_dep(name = "rules_cc", version = "0.0.9 ")
10- bazel_dep(name = "rules_python", version = "0.34 .0")
7+ bazel_dep(name = "googletest", version = "1.16 .0")
8+ bazel_dep(name = "platforms", version = "0.0.11 ")
9+ bazel_dep(name = "rules_cc", version = "0.1.1 ")
10+ bazel_dep(name = "rules_python", version = "1.3 .0")
1111
1212python = use_extension("@rules_python//python/extensions:python.bzl", "python")
1313python.toolchain(
@@ -27,7 +27,7 @@ local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl"
2727# External dependency for torch_tensorrt if you already have precompiled binaries.
2828local_repository(
2929 name = "torch_tensorrt",
30- path = "/opt/conda/lib/python3.8 /site-packages/torch_tensorrt",
30+ path = "/opt/conda/lib/python3.10 /site-packages/torch_tensorrt",
3131)
3232
3333
@@ -40,6 +40,15 @@ new_local_repository(
4040 path = "${CUDA_HOME}",
4141)
4242
43+ # Server Arm (SBSA) and Jetson Jetpack (L4T) use different versions of CUDA and TensorRT
44+ # These versions can be selected using the flag `--//toolchains/dep_collection:compute_libs="jetpack"`
45+
46+ new_local_repository(
47+ name = "cuda_l4t",
48+ build_file = "@//third_party/cuda:BUILD",
49+ path = "/usr/local/cuda-12.6",
50+ )
51+
4352new_local_repository(
4453 name = "cuda_win",
4554 build_file = "@//third_party/cuda:BUILD",
@@ -53,12 +62,31 @@ http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "ht
5362# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
5463#############################################################################################################
5564
56- http_archive(
57- name = "libtorch",
58- build_file = "@//third_party/libtorch:BUILD",
59- strip_prefix = "libtorch",
60- urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
61- )
65+ # http_archive(
66+ # name = "libtorch",
67+ # build_file = "@//third_party/libtorch:BUILD",
68+ # strip_prefix = "libtorch",
69+ # urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
70+ # )
71+
72+ # http_archive(
73+ # name = "libtorch_win",
74+ # build_file = "@//third_party/libtorch:BUILD",
75+ # strip_prefix = "libtorch",
76+ # urls = ["https://download.pytorch.org/libtorch//${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
77+ # )
78+
79+
80+ # It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
81+ # using the build flag `--//toolchains/dep_src:torch="whl"`
82+
83+ # http_archive(
84+ # name = "torch_whl",
85+ # build_file = "@//third_party/libtorch:BUILD",
86+ # strip_prefix = "torch",
87+ # type = "zip",
88+ # urls = ["file:///${TORCH_WHL_PATH}"],
89+ # )
6290
6391# Download these tarballs manually from the NVIDIA website
6492# Either place them in the distdir directory in third_party and use the --distdir flag
@@ -73,6 +101,24 @@ http_archive(
73101 ],
74102)
75103
104+ http_archive(
105+ name = "tensorrt_sbsa",
106+ build_file = "@//third_party/tensorrt/archive:BUILD",
107+ strip_prefix = "TensorRT-10.9.0.34",
108+ urls = [
109+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.9.0/tars/TensorRT-10.9.0.34.Linux.aarch64-gnu.cuda-12.8.tar.gz",
110+ ],
111+ )
112+
113+ http_archive(
114+ name = "tensorrt_l4t",
115+ build_file = "@//third_party/tensorrt/archive:BUILD",
116+ strip_prefix = "TensorRT-10.3.0.26",
117+ urls = [
118+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz",
119+ ],
120+ )
121+
76122http_archive(
77123 name = "tensorrt_win",
78124 build_file = "@//third_party/tensorrt/archive:BUILD",
@@ -95,13 +141,13 @@ http_archive(
95141# for both versions here and do not use --config=pre-cxx11-abi
96142
97143new_local_repository(
98- name = "libtorch_win ",
144+ name = "libtorch ",
99145 path = "${TORCH_INSTALL_PATH}",
100146 build_file = "third_party/libtorch/BUILD"
101147)
102148
103149new_local_repository(
104- name = "libtorch_pre_cxx11_abi ",
150+ name = "libtorch_win ",
105151 path = "${TORCH_INSTALL_PATH}",
106152 build_file = "third_party/libtorch/BUILD"
107153)
0 commit comments