diff --git a/.github/scripts/filter-matrix.py b/.github/scripts/filter-matrix.py
index bfa24ebc0e..82b062dea2 100644
--- a/.github/scripts/filter-matrix.py
+++ b/.github/scripts/filter-matrix.py
@@ -9,6 +9,13 @@
 # currently we don't support python 3.13t due to tensorrt does not support 3.13t
 disabled_python_versions: List[str] = ["3.13t"]
 
+# jetpack 6.2 only officially supports python 3.10 and cu126
+jetpack_python_versions: List[str] = ["3.10"]
+jetpack_cuda_versions: List[str] = ["cu126"]
+
+jetpack_container_image: str = "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
+sbsa_container_image: str = "quay.io/pypa/manylinux_2_34_aarch64"
+
 
 def main(args: list[str]) -> None:
     parser = argparse.ArgumentParser()
@@ -19,8 +26,23 @@ def main(args: list[str]) -> None:
         default="",
     )
 
-    options = parser.parse_args(args)
+    parser.add_argument(
+        "--jetpack",
+        help="is jetpack",
+        type=str,
+        choices=["true", "false"],
+        default="false",
+    )
 
+    parser.add_argument(
+        "--limit-pr-builds",
+        help="If it is a PR build",
+        type=str,
+        choices=["true", "false"],
+        default=os.getenv("LIMIT_PR_BUILDS", "false"),
+    )
+
+    options = parser.parse_args(args)
     if options.matrix == "":
         raise Exception("--matrix needs to be provided")
 
@@ -30,14 +52,29 @@ def main(args: list[str]) -> None:
     for item in includes:
         if item["python_version"] in disabled_python_versions:
             continue
-        if item["gpu_arch_type"] == "cuda-aarch64":
-            # pytorch image:pytorch/manylinuxaarch64-builder:cuda12.8 comes with glibc2.28
-            # however, TensorRT requires glibc2.31 on aarch64 platform
-            # TODO: in future, if pytorch supports aarch64 with glibc2.31, we should switch to use the pytorch image
-            item["container_image"] = "quay.io/pypa/manylinux_2_34_aarch64"
-            filtered_includes.append(item)
+        if options.jetpack == "true":
+            if options.limit_pr_builds == "true":
+                # limit pr build, matrix passed in from test-infra is cu128, python 3.9, change to cu126, python 3.10
+                item["desired_cuda"] = "cu126"
+                item["python_version"] = "3.10"
+                item["container_image"] = jetpack_container_image
+                filtered_includes.append(item)
+            else:
+                if (
+                    item["python_version"] in jetpack_python_versions
+                    and item["desired_cuda"] in jetpack_cuda_versions
+                ):
+                    item["container_image"] = jetpack_container_image
+                    filtered_includes.append(item)
         else:
-            filtered_includes.append(item)
+            if item["gpu_arch_type"] == "cuda-aarch64":
+                # pytorch image:pytorch/manylinuxaarch64-builder:cuda12.8 comes with glibc2.28
+                # however, TensorRT requires glibc2.31 on aarch64 platform
+                # TODO: in future, if pytorch supports aarch64 with glibc2.31, we should switch to use the pytorch image
+                item["container_image"] = sbsa_container_image
+                filtered_includes.append(item)
+            else:
+                filtered_includes.append(item)
     filtered_matrix_dict = {}
     filtered_matrix_dict["include"] = filtered_includes
     print(json.dumps(filtered_matrix_dict))
diff --git a/.github/workflows/build-test-linux-aarch64-jetpack.yml b/.github/workflows/build-test-linux-aarch64-jetpack.yml
new file mode 100644
index 0000000000..d110615b61
--- /dev/null
+++ b/.github/workflows/build-test-linux-aarch64-jetpack.yml
@@ -0,0 +1,86 @@
+name: Build and test Linux aarch64 wheels for Jetpack
+
+on:
+  pull_request:
+  push:
+    branches:
+      - main
+      - nightly
+      - release/*
+    tags:
+      # NOTE: Binary build pipelines should only get triggered on release candidate builds
+      # Release candidate tags look like: v1.11.0-rc1
+      - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
+  workflow_dispatch:
+
+jobs:
+  generate-matrix:
+    uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main
+    with:
+      package-type: wheel
+      os: linux-aarch64
+      test-infra-repository: pytorch/test-infra
+      test-infra-ref: main
+      with-rocm: false
+      with-cpu: false
+
+  filter-matrix:
+    needs: [generate-matrix]
+    outputs:
+      matrix: ${{ steps.filter.outputs.matrix }}
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/setup-python@v5
+        with:
+          python-version: "3.11"
+      - uses: actions/checkout@v4
+        with:
+          repository: pytorch/tensorrt
+      - name: Filter matrix
+        id: filter
+        env:
+           LIMIT_PR_BUILDS: ${{ github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'ciflow/binaries/all') }}
+        run: |
+          set -eou pipefail
+          echo "LIMIT_PR_BUILDS=${LIMIT_PR_BUILDS}"
+          MATRIX_BLOB=${{ toJSON(needs.generate-matrix.outputs.matrix) }}
+          MATRIX_BLOB="$(python3 .github/scripts/filter-matrix.py --matrix "${MATRIX_BLOB}" --jetpack true)"
+          echo "${MATRIX_BLOB}"
+          echo "matrix=${MATRIX_BLOB}" >> "${GITHUB_OUTPUT}"
+
+  build:
+    needs: filter-matrix
+    permissions:
+      id-token: write
+      contents: read
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+          - repository: pytorch/tensorrt
+            pre-script: packaging/pre_build_script.sh
+            env-var-script: packaging/env_vars.txt
+            post-script: packaging/post_build_script.sh
+            smoke-test-script: packaging/smoke_test_script.sh
+            package-name: torch_tensorrt
+    name: Build torch-tensorrt whl package
+    uses: ./.github/workflows/build_wheels_linux_aarch64.yml
+    with:
+      repository: ${{ matrix.repository }}
+      ref: ""
+      test-infra-repository: pytorch/test-infra
+      test-infra-ref: main
+      build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+      pre-script: ${{ matrix.pre-script }}
+      env-var-script: ${{ matrix.env-var-script }}
+      post-script: ${{ matrix.post-script }}
+      package-name: ${{ matrix.package-name }}
+      smoke-test-script: ${{ matrix.smoke-test-script }}
+      trigger-event: ${{ github.event_name }}
+      architecture: "aarch64"
+      is-jetpack: true
+
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ inputs.repository }}-${{ github.event_name == 'workflow_dispatch' }}-${{ inputs.job-name }}
+  cancel-in-progress: true
\ No newline at end of file
diff --git a/.github/workflows/build-test-linux-aarch64.yml b/.github/workflows/build-test-linux-aarch64.yml
index 23edecd3c7..5358cf4b26 100644
--- a/.github/workflows/build-test-linux-aarch64.yml
+++ b/.github/workflows/build-test-linux-aarch64.yml
@@ -27,7 +27,7 @@ jobs:
   filter-matrix:
     needs: [generate-matrix]
     outputs:
-      matrix: ${{ steps.generate.outputs.matrix }}
+      matrix: ${{ steps.filter.outputs.matrix }}
     runs-on: ubuntu-latest
     steps:
       - uses: actions/setup-python@v5
@@ -36,8 +36,10 @@ jobs:
       - uses: actions/checkout@v4
         with:
           repository: pytorch/tensorrt
-      - name: Generate matrix
-        id: generate
+      - name: Filter matrix
+        id: filter
+        env:
+          LIMIT_PR_BUILDS: ${{ github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'ciflow/binaries/all') }}
         run: |
           set -eou pipefail
           MATRIX_BLOB=${{ toJSON(needs.generate-matrix.outputs.matrix) }}
diff --git a/.github/workflows/build-test-linux-x86_64.yml b/.github/workflows/build-test-linux-x86_64.yml
index 5f24a0c7ad..6f5813aae5 100644
--- a/.github/workflows/build-test-linux-x86_64.yml
+++ b/.github/workflows/build-test-linux-x86_64.yml
@@ -1,4 +1,4 @@
-name: Build and test Linux wheels
+name: Build and test Linux x86_64 wheels
 
 on:
   pull_request:
diff --git a/.github/workflows/build_wheels_linux_aarch64.yml b/.github/workflows/build_wheels_linux_aarch64.yml
index ce7a0b2f98..77eec4eea9 100644
--- a/.github/workflows/build_wheels_linux_aarch64.yml
+++ b/.github/workflows/build_wheels_linux_aarch64.yml
@@ -88,6 +88,11 @@ on:
         required: false
         default: "python -m build --wheel"
         type: string
+      is-jetpack:
+        description: Set to true if the build is for jetpack
+        required: false
+        default: false
+        type: boolean
       pip-install-torch-extra-args:
         # NOTE: Why does this exist?
         # Well setuptools / python packaging doesn't actually allow you to specify dependencies
@@ -128,7 +133,7 @@ jobs:
       UPLOAD_TO_BASE_BUCKET: ${{ matrix.upload_to_base_bucket }}
       ARCH: ${{ inputs.architecture }}
       BUILD_TARGET: ${{ inputs.build-target }}
-    name: build-${{ matrix.build_name }}
+    name: build-wheel-${{ matrix.python_version }}-${{ matrix.desired_cuda }}-${{ matrix.gpu_arch_type }}
     runs-on: ${{ matrix.validation_runner }}
     environment: ${{(inputs.trigger-event == 'schedule' || (inputs.trigger-event == 'push' && (startsWith(github.event.ref, 'refs/heads/nightly') || startsWith(github.event.ref, 'refs/tags/v')))) && 'pytorchbot-env' || ''}}
     container:
@@ -170,6 +175,11 @@ jobs:
           # when using Python version, less than the conda latest
           ###############################################################################
           echo 'Installing conda-forge'
+          if [[ ${{ inputs.is-jetpack }} == true ]]; then
+            # jetpack base image is ubuntu 22.04, does not have curl installed
+            apt-get update
+            apt-get install -y curl git
+          fi
           curl -L -o /mambaforge.sh https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-aarch64.sh
           chmod +x /mambaforge.sh
           /mambaforge.sh -b -p /opt/conda
@@ -195,12 +205,11 @@ jobs:
           python-version: ${{ env.PYTHON_VERSION }}
           cuda-version: ${{ env.CU_VERSION }}
           arch: ${{ env.ARCH }}
-
       - name: Combine Env Var and Build Env Files
         if: ${{ inputs.env-var-script != '' }}
         working-directory: ${{ inputs.repository }}
         run: |
-          set -euxo pipefail
+          set -x
           cat "${{ inputs.env-var-script }}" >> "${BUILD_ENV_FILE}"
       - name: Add XPU Env Vars in Build Env File
         if: ${{ matrix.gpu_arch_type == 'xpu' }}
@@ -211,6 +220,7 @@ jobs:
             echo "source /opt/intel/oneapi/pti/latest/env/vars.sh"
           } >> "${BUILD_ENV_FILE}"
       - name: Install torch dependency
+        if: ${{ inputs.is-jetpack == false }}
         run: |
           set -euxo pipefail
           # shellcheck disable=SC1090
@@ -241,12 +251,17 @@ jobs:
         working-directory: ${{ inputs.repository }}
         shell: bash -l {0}
         run: |
-          set -euxo pipefail
+          #set -euxo pipefail
+          set -x
           source "${BUILD_ENV_FILE}"
           export PYTORCH_VERSION="$(${CONDA_RUN} pip show torch | grep ^Version: | sed 's/Version: *//' | sed 's/+.\+//')"
           ${CONDA_RUN} python setup.py clean
           echo "Successfully ran `python setup.py clean`"
-          ${CONDA_RUN} python setup.py bdist_wheel
+          if [[ ${{ inputs.is-jetpack }} == false ]]; then
+            ${CONDA_RUN} python setup.py bdist_wheel
+          else
+            ${CONDA_RUN} python setup.py bdist_wheel --jetpack --plat-name=linux_tegra_aarch64
+          fi
       - name: Repair Manylinux_2_28 Wheel
         shell: bash -l {0}
         env:
@@ -272,6 +287,7 @@ jobs:
           script: ${{ inputs.post-script }}
       - name: Smoke Test
         shell: bash -l {0}
+        if: ${{ inputs.is-jetpack == false }}
         env:
           PACKAGE_NAME: ${{ inputs.package-name }}
           SMOKE_TEST_SCRIPT: ${{ inputs.smoke-test-script }}
@@ -316,7 +332,8 @@ jobs:
   upload:
     needs: build
     uses: pytorch/test-infra/.github/workflows/_binary_upload.yml@main
-    if: always()
+    # only upload to pytorch index for non jetpack builds
+    if: ${{ inputs.is-jetpack == false }}
     with:
       repository: ${{ inputs.repository }}
       ref: ${{ inputs.ref }}
diff --git a/MODULE.bazel b/MODULE.bazel
index 008c7f53fc..5d15a52152 100644
--- a/MODULE.bazel
+++ b/MODULE.bazel
@@ -87,6 +87,15 @@ http_archive(
     urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip"],
 )
 
+http_archive(
+    name = "torch_l4t",
+    build_file = "@//third_party/libtorch:BUILD",
+    strip_prefix = "torch",
+    type = "zip",
+    urls = ["https://pypi.jetson-ai-lab.dev/jp6/cu126/+f/6ef/f643c0a7acda9/torch-2.7.0-cp310-cp310-linux_aarch64.whl"],
+    sha256 = "6eff643c0a7acda92734cc798338f733ff35c7df1a4434576f5ff7c66fc97319"
+)
+
 # Download these tarballs manually from the NVIDIA website
 # Either place them in the distdir directory in third_party and use the --distdir flag
 # or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz
diff --git a/core/BUILD b/core/BUILD
index 28cd460690..6f5cfad30f 100644
--- a/core/BUILD
+++ b/core/BUILD
@@ -66,6 +66,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/conversion/BUILD b/core/conversion/BUILD
index 13696550e6..ff87c5a4b8 100644
--- a/core/conversion/BUILD
+++ b/core/conversion/BUILD
@@ -61,6 +61,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/conversion/conversionctx/BUILD b/core/conversion/conversionctx/BUILD
index 89ff7f613c..b6820fc757 100644
--- a/core/conversion/conversionctx/BUILD
+++ b/core/conversion/conversionctx/BUILD
@@ -56,6 +56,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/conversion/converters/BUILD b/core/conversion/converters/BUILD
index 9571d91604..456b8ee7d4 100644
--- a/core/conversion/converters/BUILD
+++ b/core/conversion/converters/BUILD
@@ -56,6 +56,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
@@ -81,6 +82,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
@@ -143,6 +145,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/conversion/evaluators/BUILD b/core/conversion/evaluators/BUILD
index 172e8f6670..d3adad10cd 100644
--- a/core/conversion/evaluators/BUILD
+++ b/core/conversion/evaluators/BUILD
@@ -62,6 +62,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/conversion/tensorcontainer/BUILD b/core/conversion/tensorcontainer/BUILD
index c812b74a9f..951a0b886e 100644
--- a/core/conversion/tensorcontainer/BUILD
+++ b/core/conversion/tensorcontainer/BUILD
@@ -55,6 +55,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/conversion/var/BUILD b/core/conversion/var/BUILD
index 65d9583474..770d3c2120 100644
--- a/core/conversion/var/BUILD
+++ b/core/conversion/var/BUILD
@@ -58,6 +58,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/ir/BUILD b/core/ir/BUILD
index d522a6a018..fce3fbe51f 100644
--- a/core/ir/BUILD
+++ b/core/ir/BUILD
@@ -58,6 +58,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/lowering/BUILD b/core/lowering/BUILD
index e9b1e1ae07..27af435927 100644
--- a/core/lowering/BUILD
+++ b/core/lowering/BUILD
@@ -60,6 +60,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/lowering/passes/BUILD b/core/lowering/passes/BUILD
index 42dc4faab5..845abdb62a 100644
--- a/core/lowering/passes/BUILD
+++ b/core/lowering/passes/BUILD
@@ -78,6 +78,7 @@ cc_library(
     ] + select({
         ":use_torch_whl": ["@torch_whl//:libtorch"],
         ":windows": ["@libtorch_win//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/partitioning/BUILD b/core/partitioning/BUILD
index 2cbcec34b1..378752cdfd 100644
--- a/core/partitioning/BUILD
+++ b/core/partitioning/BUILD
@@ -63,6 +63,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/partitioning/partitioningctx/BUILD b/core/partitioning/partitioningctx/BUILD
index 011a48c6be..bd21aba7ff 100644
--- a/core/partitioning/partitioningctx/BUILD
+++ b/core/partitioning/partitioningctx/BUILD
@@ -59,6 +59,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/partitioning/partitioninginfo/BUILD b/core/partitioning/partitioninginfo/BUILD
index d2a86a2688..daebcd615f 100644
--- a/core/partitioning/partitioninginfo/BUILD
+++ b/core/partitioning/partitioninginfo/BUILD
@@ -58,6 +58,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/partitioning/segmentedblock/BUILD b/core/partitioning/segmentedblock/BUILD
index c463d17b92..83e45eaf14 100644
--- a/core/partitioning/segmentedblock/BUILD
+++ b/core/partitioning/segmentedblock/BUILD
@@ -58,6 +58,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/plugins/BUILD b/core/plugins/BUILD
index ba167d5f2a..cebce31941 100644
--- a/core/plugins/BUILD
+++ b/core/plugins/BUILD
@@ -77,6 +77,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/runtime/BUILD b/core/runtime/BUILD
index f30519619b..72c670bff1 100644
--- a/core/runtime/BUILD
+++ b/core/runtime/BUILD
@@ -70,6 +70,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/util/BUILD b/core/util/BUILD
index bc9b53ec8d..4f522704ee 100644
--- a/core/util/BUILD
+++ b/core/util/BUILD
@@ -62,6 +62,7 @@ cc_library(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -96,6 +97,7 @@ cc_library(
     deps = select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -119,6 +121,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/core/util/logging/BUILD b/core/util/logging/BUILD
index d29568cf97..f0cc067af9 100644
--- a/core/util/logging/BUILD
+++ b/core/util/logging/BUILD
@@ -53,6 +53,7 @@ cc_library(
     }) + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
     alwayslink = True,
diff --git a/cpp/bin/torchtrtc/BUILD b/cpp/bin/torchtrtc/BUILD
index 51ee4ca2ab..d858d4de93 100644
--- a/cpp/bin/torchtrtc/BUILD
+++ b/cpp/bin/torchtrtc/BUILD
@@ -9,6 +9,15 @@ config_setting(
     },
 )
 
+config_setting(
+    name = "jetpack",
+    constraint_values = [
+        "@platforms//cpu:aarch64",
+    ],
+    flag_values = {
+        "//toolchains/dep_collection:compute_libs": "jetpack"
+    },
+)
 config_setting(
     name = "windows",
     constraint_values = [
@@ -43,6 +52,10 @@ cc_binary(
             "@torch_whl//:caffe2",
             "@torch_whl//:libtorch"
         ],
+        ":jetpack": [
+            "@torch_l4t//:caffe2",
+            "@torch_l4t//:libtorch"
+        ],
         "//conditions:default": [
             "@libtorch",
             "@libtorch//:caffe2",
diff --git a/docsrc/getting_started/jetpack.rst b/docsrc/getting_started/jetpack.rst
index ddbf89dc63..edfe1ae52e 100644
--- a/docsrc/getting_started/jetpack.rst
+++ b/docsrc/getting_started/jetpack.rst
@@ -1,119 +1,122 @@
-.. _Torch_TensorRT_in_JetPack_6.1
+.. _Torch_TensorRT_in_JetPack:
 
-Overview
-##################
-
-JetPack 6.1
----------------------
-Nvida JetPack 6.1 is the latest production release ofJetPack 6.
-With this release it incorporates:
-CUDA 12.6
-TensorRT 10.3
-cuDNN 9.3
-DLFW 24.09
+Torch-TensorRT in JetPack
+#############################
 
-You can find more details for the JetPack 6.1:
+Overview
+********
 
-    * https://docs.nvidia.com/jetson/jetpack/release-notes/index.html
-    * https://docs.nvidia.com/deeplearning/frameworks/install-pytorch-jetson-platform/index.html
+JetPack 6.2
+===========
+NVIDIA JetPack 6.2 is the latest production release for Jetson platforms, featuring:
+- CUDA 12.6
+- TensorRT 10.3
+- cuDNN 9.3
 
+For detailed information about JetPack 6.2, refer to:
+* `JetPack 6.2 Release Notes <https://docs.nvidia.com/jetson/jetpack/release-notes/index.html>`_
+* `PyTorch for Jetson Platform <https://docs.nvidia.com/deeplearning/frameworks/install-pytorch-jetson-platform/index.html>`_
 
 Prerequisites
-~~~~~~~~~~~~~~
+*************
 
+System Preparation
+==================
+1. **Flash your Jetson device**
 
-Ensure your jetson developer kit has been flashed with the latest JetPack 6.1. You can find more details on how to flash Jetson board via sdk-manager:
+   with JetPack 6.2 using SDK Manager:
+   - `SDK Manager Guide <https://developer.nvidia.com/sdk-manager>`_
 
-    * https://developer.nvidia.com/sdk-manager
+2. **Verify JetPack installation**:
 
+   .. code-block:: sh
 
-check the current jetpack version using
+      apt show nvidia-jetpack
 
-.. code-block:: sh
+3. **Install development components**:
+   .. code-block:: sh
 
-    apt show nvidia-jetpack
+      sudo apt-get update
+      sudo apt-get install nvidia-jetpack
 
-Ensure you have installed JetPack Dev components. This step is required if you need to build on jetson board.
+4. **Confirm CUDA 12.6 installation**:
 
-You can only install the dev components that you require: ex, tensorrt-dev would be the meta-package for all TRT development or install everthing.
+   .. code-block:: sh
 
-.. code-block:: sh
-    # install all the nvidia-jetpack dev components
-    sudo apt-get update
-    sudo apt-get install nvidia-jetpack
+      nvcc --version
+      # If missing or incorrect version:
+      sudo apt-get install cuda-toolkit-12-6
 
-Ensure you have cuda 12.6 installed(this should be installed automatically from nvidia-jetpack)
+5. **Validate cuSPARSELt library**:
 
-.. code-block:: sh
+   .. code-block:: sh
 
-    # check the cuda version
-    nvcc --version
-    # if not installed or the version is not 12.6, install via the below cmd:
-    sudo apt-get update
-    sudo apt-get install cuda-toolkit-12-6
+      # Check library presence
+      ls /usr/local/cuda/lib64/libcusparseLt.so
 
-Ensure libcusparseLt.so exists at /usr/local/cuda/lib64/:
+      # Install if missing
+      wget https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
+      tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
+      sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/
+      sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/
 
-.. code-block:: sh
+Building Torch-TensorRT
+***********************
 
-    # if not exist, download and copy to the directory
-    wget https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
-    tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz
-    sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/
-    sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/
+Build Environment Setup
+=======================
+1. **Install Build Dependencies**:
 
+   .. code-block:: sh
 
-Build torch_tensorrt
-~~~~~~~~~~~~~~
+      wget https://github.com/bazelbuild/bazelisk/releases/download/v1.26.0/bazelisk-linux-arm64
+      sudo mv bazelisk-linux-arm64 /usr/bin/bazel
+      sudo chmod +x /usr/bin/bazel
 
+   .. code-block:: sh
 
-Install bazel
+      apt-get install ninja-build vim libopenblas-dev git
 
-.. code-block:: sh
+2. **Install Python dependencies**:
 
-    wget -v https://github.com/bazelbuild/bazelisk/releases/download/v1.20.0/bazelisk-linux-arm64
-    sudo mv bazelisk-linux-arm64 /usr/bin/bazel
-    chmod +x /usr/bin/bazel
+   .. code-block:: sh
 
-Install pip and required python packages:
-    * https://pip.pypa.io/en/stable/installation/
+      wget https://bootstrap.pypa.io/get-pip.py
+      python get-pip.py
+      python -m pip install pyyaml
 
-.. code-block:: sh
+3. **Install PyTorch**:
 
-    # install pip
-    wget https://bootstrap.pypa.io/get-pip.py
-    python get-pip.py
+   .. code-block:: sh
 
-.. code-block:: sh
-
-   # install pytorch from nvidia jetson distribution: https://developer.download.nvidia.com/compute/redist/jp/v61/pytorch
-   python -m pip install torch https://developer.download.nvidia.com/compute/redist/jp/v61/pytorch/torch-2.5.0a0+872d972e41.nv24.08.17622132-cp310-cp310-linux_aarch64.whl
-
-.. code-block:: sh
+      # Can only install the torch and torchvision wheel from the JPL repo which is built specifically for JetPack 6.2
+      python -m pip install torch==2.7.0 torchvision==0.22.0  --index-url=https://pypi.jetson-ai-lab.dev/jp6/cu126/
 
-    # install required python packages
-    python -m pip install -r toolchains/jp_workspaces/requirements.txt
 
-    # if you want to run the test cases, then install the test required python packages
-    python -m pip install -r toolchains/jp_workspaces/test_requirements.txt
+Building the Wheel
+==================
 
+.. code-block:: sh
+   python setup.py bdist_wheel
 
-Build and Install torch_tensorrt wheel file
-
+Installation
+============
 
-Since torch_tensorrt version has dependencies on torch version. torch version supported by JetPack6.1 is from DLFW 24.08/24.09(torch 2.5.0).
+.. code-block:: sh
+   # you will be able to find the wheel in the dist directory, has platform name linux_tegra_aarch64
+   cd dist
+   python -m pip install torch_tensorrt-2.8.0.dev0+d8318d8fc-cp310-cp310-linux_tegra_aarch64.whl
 
-Please make sure to build torch_tensorrt wheel file from source release/2.5 branch
-(TODO: lanl to update the branch name once release/ngc branch is available)
+Post-Installation Verification
+==============================
 
-.. code-block:: sh
+Verify installation by importing in Python:
+.. code-block:: python
 
-    cuda_version=$(nvcc --version | grep Cuda | grep release | cut -d ',' -f 2 | sed -e 's/ release //g')
-    export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))")
-    export SITE_PACKAGE_PATH=${TORCH_INSTALL_PATH::-6}
-    export CUDA_HOME=/usr/local/cuda-${cuda_version}/
-    # replace the MODULE.bazel with the jetpack one
-    cat toolchains/jp_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
-    # build and install torch_tensorrt wheel file
-    python setup.py install --user
+   # verify whether the torch-tensorrt can be imported
+   import torch
+   import torch_tensorrt
+   print(torch_tensorrt.__version__)
 
+   # verify whether the examples can be run
+   python examples/dynamo/torch_compile_resnet_example.py
diff --git a/examples/int8/benchmark/BUILD b/examples/int8/benchmark/BUILD
index 3b464b3a99..6f07d6d453 100644
--- a/examples/int8/benchmark/BUILD
+++ b/examples/int8/benchmark/BUILD
@@ -29,6 +29,10 @@ cc_library(
             "@torch_whl//:libtorch",
             "@torch_whl//:caffe2",
         ],
+        ":jetpack": [
+            "@torch_l4t//:libtorch",
+            "@torch_l4t//:caffe2",
+        ],
         "//conditions:default": [
             "@libtorch//:libtorch",
             "@libtorch//:caffe2",
diff --git a/examples/int8/ptq/BUILD b/examples/int8/ptq/BUILD
index d30c7d3c03..c7da7d854f 100644
--- a/examples/int8/ptq/BUILD
+++ b/examples/int8/ptq/BUILD
@@ -62,6 +62,10 @@ cc_binary(
                 "@torch_whl//:libtorch",
                 "@torch_whl//:caffe2",
             ],
+            ":jetpack": [
+                "@torch_l4t//:libtorch",
+                "@torch_l4t//:caffe2",
+            ],
             "//conditions:default": [
                 "@libtorch//:libtorch",
                 "@libtorch//:caffe2",
diff --git a/examples/int8/qat/BUILD b/examples/int8/qat/BUILD
index 0aab56a02a..ab5fa07246 100644
--- a/examples/int8/qat/BUILD
+++ b/examples/int8/qat/BUILD
@@ -63,6 +63,10 @@ cc_binary(
             "@torch_whl//:libtorch",
             "@torch_whl//:caffe2",
         ],
+        ":jetpack": [
+            "@torch_l4t//:libtorch",
+            "@torch_l4t//:caffe2",
+        ],
         "//conditions:default": [
             "@libtorch//:libtorch",
             "@libtorch//:caffe2",
diff --git a/packaging/pre_build_script.sh b/packaging/pre_build_script.sh
index d6764efe89..f7cbb4919e 100755
--- a/packaging/pre_build_script.sh
+++ b/packaging/pre_build_script.sh
@@ -5,19 +5,33 @@ set -x
 # Install dependencies
 python3 -m pip install pyyaml
 
-yum install -y ninja-build gettext
+if [[ $(uname -m) == "aarch64" ]]; then
+  IS_AARCH64=true
+  BAZEL_PLATFORM=arm64
+  os_name=$(cat /etc/os-release | grep -w "ID" | cut -d= -f2)
+  if [[ ${os_name} == "ubuntu" ]]; then
+      IS_JETPACK=true
+      apt-get update
+      apt-get install -y ninja-build gettext curl libopenblas-dev
+  else
+      IS_SBSA=true
+      yum install -y ninja-build gettext
+  fi
+else
+  BAZEL_PLATFORM="amd64"
+fi
 
-BAZEL_PLATFORM="amd64"
 
-if [[ $(uname -m) == "aarch64" ]]; then
-    BAZEL_PLATFORM=arm64
-    rm -rf /opt/openssl # Not sure whats up with the openssl mismatch
+if [[ ${IS_AARCH64} == true ]]; then
     # aarch64 does not have envsubst pre-installed in the image, install it here
     curl -L  https://github.com/a8m/envsubst/releases/download/v1.4.2/envsubst-Linux-arm64 -o envsubst \
     && mv envsubst /usr/bin/envsubst && chmod +x /usr/bin/envsubst
-    # install cuda for aarch64
-    source .github/scripts/install-cuda-aarch64.sh
-    install_cuda_aarch64
+    # install cuda for SBSA
+    if [[ ${IS_SBSA} == true ]]; then
+        rm -rf /opt/openssl # Not sure whats up with the openssl mismatch
+        source .github/scripts/install-cuda-aarch64.sh
+        install_cuda_aarch64
+    fi
 fi
 
 curl -L https://github.com/bazelbuild/bazelisk/releases/download/v1.26.0/bazelisk-linux-${BAZEL_PLATFORM} \
@@ -25,12 +39,18 @@ curl -L https://github.com/bazelbuild/bazelisk/releases/download/v1.26.0/bazelis
     && mv bazelisk-linux-${BAZEL_PLATFORM} /usr/bin/bazel \
     && chmod +x /usr/bin/bazel
 
-TORCH_TORCHVISION=$(grep "^torch" py/requirements.txt)
-INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION}
-
-# Install all the dependencies required for Torch-TensorRT
 pip uninstall -y torch torchvision
-pip install --force-reinstall --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL}
+
+if [[ ${IS_JETPACK} == true ]]; then
+    # install torch 2.7 torchvision 0.22.0 for jp6.2
+    pip install torch==2.7.0 torchvision==0.22.0  --index-url=https://pypi.jetson-ai-lab.dev/jp6/cu126/
+else
+    TORCH_TORCHVISION=$(grep "^torch" py/requirements.txt)
+    INDEX_URL=https://download.pytorch.org/whl/${CHANNEL}/${CU_VERSION}
+
+    # Install all the dependencies required for Torch-TensorRT
+    pip install --force-reinstall --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL}
+fi
 
 export TORCH_BUILD_NUMBER=$(python -c "import torch, urllib.parse as ul; print(ul.quote_plus(torch.__version__))")
 export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))")
@@ -54,10 +74,11 @@ if [[ "${CU_VERSION::4}" < "cu12" ]]; then
          pyproject.toml
 fi
 
+cat toolchains/ci_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
+
 if [[ ${TENSORRT_VERSION} != "" ]]; then
-  cat toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl | envsubst > MODULE.bazel
-else
-  cat toolchains/ci_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
+    sed -i -e "s/strip_prefix = \"TensorRT-.*\"/strip_prefix = \"${TENSORRT_STRIP_PREFIX}\"/g" MODULE.bazel
+    sed -i -e "s#\"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/.*\"#\"${TENSORRT_URLS}\"#g" MODULE.bazel
 fi
 
 cat MODULE.bazel
diff --git a/packaging/pre_build_script_windows.sh b/packaging/pre_build_script_windows.sh
index 726d71970e..f73f8875aa 100644
--- a/packaging/pre_build_script_windows.sh
+++ b/packaging/pre_build_script_windows.sh
@@ -35,11 +35,11 @@ pip install --force-reinstall --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL
 export CUDA_HOME="$(echo ${CUDA_PATH} | sed -e 's#\\#\/#g')"
 export TORCH_INSTALL_PATH="$(python -c "import torch, os; print(os.path.dirname(torch.__file__))" | sed -e 's#\\#\/#g')"
 
+cat toolchains/ci_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
 
 if [[ ${TENSORRT_VERSION} != "" ]]; then
-  cat toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl | envsubst > MODULE.bazel
-else
-  cat toolchains/ci_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
+    sed -i -e "s/strip_prefix = \"TensorRT-.*\"/strip_prefix = \"${TENSORRT_STRIP_PREFIX}\"/g" MODULE.bazel
+    sed -i -e "s#\"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/.*\"#\"${TENSORRT_URLS}\"#g" MODULE.bazel
 fi
 
 cat MODULE.bazel
diff --git a/pyproject.toml b/pyproject.toml
index 3bb857e3e0..a28eb1b046 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,10 +8,13 @@ requires = [
     "cffi>=1.15.1",
     "typing-extensions>=4.7.0",
     "future>=0.18.3",
-    "tensorrt-cu12>=10.9.0,<10.10.0",
-    "torch>=2.8.0.dev,<2.9.0",
+    "tensorrt-cu12>=10.9.0,<10.10.0; 'tegra' not in platform_release",
+    "tensorrt>=10.3.0,<10.4.0; 'tegra' in platform_release",
+    "torch>=2.8.0.dev,<2.9.0; 'tegra' not in platform_release",
+    "torch>=2.7.0,<2.8.0; 'tegra' in platform_release",
     "pybind11==2.6.2",
-    "numpy",
+    "numpy; 'tegra' not in platform_release",
+    "numpy<2.0.0; 'tegra' in platform_release",
     "sympy",
 ]
 build-backend = "setuptools.build_meta"
@@ -55,13 +58,21 @@ keywords = [
     "inference",
 ]
 dependencies = [
-    "torch>=2.8.0.dev,<2.9.0",
-    "tensorrt>=10.9.0,<10.10.0",
-    "tensorrt-cu12>=10.9.0,<10.10.0",
-    "tensorrt-cu12-bindings>=10.9.0,<10.10.0",
-    "tensorrt-cu12-libs>=10.9.0,<10.10.0",
+    "torch>=2.8.0.dev,<2.9.0; 'tegra' not in platform_release",
+    "torch>=2.7.0,<2.8.0; 'tegra' in platform_release",
+
+    "tensorrt>=10.9.0,<10.10.0; 'tegra' not in platform_release",
+    "tensorrt>=10.3.0,<10.4.0; 'tegra' in platform_release",
+
+    "tensorrt-cu12>=10.9.0,<10.10.0; 'tegra' not in platform_release",
+    "tensorrt-cu12-bindings>=10.9.0,<10.10.0; 'tegra' not in platform_release",
+    "tensorrt-cu12-libs>=10.9.0,<10.10.0; 'tegra' not in platform_release",
+
     "packaging>=23",
-    "numpy",
+
+    "numpy; 'tegra' not in platform_release",
+    "numpy<2.0.0; 'tegra' in platform_release",
+
     "typing-extensions>=4.7.0",
 ]
 
diff --git a/setup.py b/setup.py
index 9e386975e5..a342967a92 100644
--- a/setup.py
+++ b/setup.py
@@ -79,8 +79,8 @@ def load_dep_info():
 dir_path = os.path.join(str(get_root_dir()), "py")
 
 IS_AARCH64 = platform.uname().processor == "aarch64"
-IS_JETPACK = False
-IS_SBSA = False
+IS_JETPACK = True if "tegra" in platform.uname().release else False
+
 PY_ONLY = False
 NO_TS = False
 LEGACY = False
@@ -122,11 +122,11 @@ def load_dep_info():
 if (gpu_arch_version := os.environ.get("CU_VERSION")) is None:
     gpu_arch_version = f"cu{__cuda_version__.replace('.','')}"
 
-if (jetpack := os.environ.get("JETPACK_BUILD")) is not None:
+if IS_AARCH64 and (jetpack := os.environ.get("JETPACK_BUILD")) is not None:
     if jetpack == "1":
         IS_JETPACK = True
 
-if (sbsa := os.environ.get("SBSA_BUILD")) is not None:
+if IS_AARCH64 and (sbsa := os.environ.get("SBSA_BUILD")) is not None:
     if sbsa == "1":
         IS_SBSA = True
 
@@ -147,10 +147,28 @@ def load_dep_info():
 if IS_AARCH64:
     if "--jetpack" in sys.argv:
         sys.argv.remove("--jetpack")
+        # this is used to simulate the jetpack build on aarch64 machines(non-tegra platforms)
         IS_JETPACK = True
     else:
         IS_SBSA = True
 
+IS_SBSA = True if IS_AARCH64 and not IS_JETPACK else False
+
+if IS_JETPACK and "bdist_wheel" in sys.argv:
+    needs_append_plat_name = True
+    for i, arg in enumerate(sys.argv):
+        if (
+            arg == "--plat-name"
+            and i + 1 < len(sys.argv)
+            and sys.argv[i + 1] == "linux_tegra_aarch64"
+        ):
+            needs_append_plat_name = False
+            break
+        if arg == "--plat-name=linux_tegra_aarch64":
+            needs_append_plat_name = False
+            break
+    if needs_append_plat_name:
+        sys.argv.append("--plat-name=linux_tegra_aarch64")
 
 BAZEL_EXE = None
 if not PY_ONLY:
diff --git a/tests/core/BUILD b/tests/core/BUILD
index a0e19fa232..5c0b762847 100644
--- a/tests/core/BUILD
+++ b/tests/core/BUILD
@@ -51,6 +51,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
diff --git a/tests/core/lowering/lowering_test.bzl b/tests/core/lowering/lowering_test.bzl
index 9d2c38a969..04167cbb48 100644
--- a/tests/core/lowering/lowering_test.bzl
+++ b/tests/core/lowering/lowering_test.bzl
@@ -21,6 +21,7 @@ def lowering_test(name, visibility = None):
         ] + select({
             ":windows": ["@libtorch_win//:libtorch"],
             ":use_torch_whl": ["@torch_whl//:libtorch"],
+            ":jetpack": ["@torch_l4t//:libtorch"],
             "//conditions:default": ["@libtorch"],
         }),
         timeout = "short",
diff --git a/tests/core/partitioning/BUILD b/tests/core/partitioning/BUILD
index b2f99dbd34..ee3a992eb3 100644
--- a/tests/core/partitioning/BUILD
+++ b/tests/core/partitioning/BUILD
@@ -64,6 +64,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -80,6 +81,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -96,6 +98,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -112,6 +115,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
diff --git a/tests/core/partitioning/partitioning_test.bzl b/tests/core/partitioning/partitioning_test.bzl
index f89dd22f40..5128203064 100644
--- a/tests/core/partitioning/partitioning_test.bzl
+++ b/tests/core/partitioning/partitioning_test.bzl
@@ -21,6 +21,7 @@ def partitioning_test(name, visibility = None):
         ] + select({
             ":windows": ["@libtorch_win//:libtorch"],
             ":use_torch_whl": ["@torch_whl//:libtorch"],
+            ":jetpack": ["@torch_l4t//:libtorch"],
             "//conditions:default": ["@libtorch"],
         }),
         #timeout = "short",
diff --git a/tests/core/runtime/runtime_test.bzl b/tests/core/runtime/runtime_test.bzl
index 3363edb627..acd2bf47c8 100644
--- a/tests/core/runtime/runtime_test.bzl
+++ b/tests/core/runtime/runtime_test.bzl
@@ -21,6 +21,7 @@ def runtime_test(name, visibility = None):
         ] + select({
             ":windows": ["@libtorch_win//:libtorch"],
             ":use_torch_whl": ["@torch_whl//:libtorch"],
+            ":jetpack": ["@torch_l4t//:libtorch"],
             "//conditions:default": ["@libtorch"],
         }),
     )
diff --git a/tests/cpp/BUILD b/tests/cpp/BUILD
index b50a3c6783..72b448cd0f 100644
--- a/tests/cpp/BUILD
+++ b/tests/cpp/BUILD
@@ -95,6 +95,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -134,6 +135,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -150,6 +152,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -163,6 +166,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -179,6 +183,7 @@ cc_test(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
@@ -215,6 +220,7 @@ cc_library(
     ] + select({
         ":windows": ["@libtorch_win//:libtorch"],
         ":use_torch_whl": ["@torch_whl//:libtorch"],
+        ":jetpack": ["@torch_l4t//:libtorch"],
         "//conditions:default": ["@libtorch"],
     }),
 )
diff --git a/tests/util/BUILD b/tests/util/BUILD
index 38e4b5ff6d..3b89c9073e 100644
--- a/tests/util/BUILD
+++ b/tests/util/BUILD
@@ -71,6 +71,10 @@ cc_library(
             "@torch_whl//:caffe2",
             "@torch_whl//:libtorch",
         ],
+        ":jetpack": [
+            "@torch_l4t//:libtorch",
+            "@torch_l4t//:caffe2",
+        ],
         "//conditions:default": [
             "@libtorch",
             "@libtorch//:caffe2",
diff --git a/toolchains/ci_workspaces/MODULE.bazel.tmpl b/toolchains/ci_workspaces/MODULE.bazel.tmpl
index d4587c6a64..e3888c2584 100644
--- a/toolchains/ci_workspaces/MODULE.bazel.tmpl
+++ b/toolchains/ci_workspaces/MODULE.bazel.tmpl
@@ -127,6 +127,12 @@ new_local_repository(
     build_file = "third_party/libtorch/BUILD"
 )
 
+new_local_repository(
+    name = "torch_l4t",
+    path = "${TORCH_INSTALL_PATH}",
+    build_file = "third_party/libtorch/BUILD"
+)
+
 #new_local_repository(
 #   name = "tensorrt",
 #   path = "/usr/",
diff --git a/toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl b/toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl
deleted file mode 100644
index 46c4e68a3c..0000000000
--- a/toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl
+++ /dev/null
@@ -1,102 +0,0 @@
-module(
-    name = "torch_tensorrt",
-    repo_name = "org_pytorch_tensorrt",
-    version = "${BUILD_VERSION}"
-)
-
-bazel_dep(name = "googletest", version = "1.14.0")
-bazel_dep(name = "platforms", version = "0.0.10")
-bazel_dep(name = "rules_cc", version = "0.0.9")
-bazel_dep(name = "rules_python", version = "0.34.0")
-
-python = use_extension("@rules_python//python/extensions:python.bzl", "python")
-python.toolchain(
-    ignore_root_user_error = True,
-    python_version = "3.11",
-)
-
-bazel_dep(name = "rules_pkg", version = "1.0.1")
-git_override(
-    module_name = "rules_pkg",
-    commit = "17c57f4",
-    remote = "https://github.com/narendasan/rules_pkg",
-)
-
-local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "local_repository")
-
-# External dependency for torch_tensorrt if you already have precompiled binaries.
-local_repository(
-    name = "torch_tensorrt",
-    path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt",
-)
-
-
-new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")
-
-# CUDA should be installed on the system locally
-new_local_repository(
-    name = "cuda",
-    build_file = "@//third_party/cuda:BUILD",
-    path = "${CUDA_HOME}",
-)
-
-new_local_repository(
-    name = "cuda_win",
-    build_file = "@//third_party/cuda:BUILD",
-    path = "${CUDA_HOME}",
-)
-
-
-http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
-
-#############################################################################################################
-# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
-#############################################################################################################
-
-http_archive(
-    name = "libtorch",
-    build_file = "@//third_party/libtorch:BUILD",
-    strip_prefix = "libtorch",
-    urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
-)
-
-# Download these tarballs manually from the NVIDIA website
-# Either place them in the distdir directory in third_party and use the --distdir flag
-# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz
-
-http_archive(
-    name = "tensorrt",
-    build_file = "@//third_party/tensorrt/archive:BUILD",
-    strip_prefix = "${TENSORRT_STRIP_PREFIX}",
-    urls = [
-        "${TENSORRT_URLS}",
-    ],
-)
-
-http_archive(
-    name = "tensorrt_win",
-    build_file = "@//third_party/tensorrt/archive:BUILD",
-    strip_prefix = "${TENSORRT_STRIP_PREFIX}",
-    urls = [
-        "${TENSORRT_URLS}",
-    ],
-)
-
-
-####################################################################################
-# Locally installed dependencies (use in cases of custom dependencies or aarch64)
-####################################################################################
-
-# NOTE: If you are using a local build of torch, just point the Libtorch dep to that path.
-
-new_local_repository(
-    name = "libtorch_win",
-    path = "${TORCH_INSTALL_PATH}",
-    build_file = "third_party/libtorch/BUILD"
-)
-
-#new_local_repository(
-#   name = "tensorrt",
-#   path = "/usr/",
-#   build_file = "@//third_party/tensorrt/local:BUILD"
-#)