diff --git a/.github/workflows/regression_test.yml b/.github/workflows/regression_test.yml index 076041d193..2726ced40a 100644 --- a/.github/workflows/regression_test.yml +++ b/.github/workflows/regression_test.yml @@ -23,11 +23,6 @@ jobs: fail-fast: false matrix: include: - - name: CUDA 2.2.2 - runs-on: linux.g5.12xlarge.nvidia.gpu - torch-spec: 'torch==2.2.2 "numpy<2" ' - gpu-arch-type: "cuda" - gpu-arch-version: "12.1" - name: CUDA 2.3 runs-on: linux.g5.12xlarge.nvidia.gpu torch-spec: 'torch==2.3.0' @@ -38,17 +33,17 @@ jobs: torch-spec: 'torch==2.4.0' gpu-arch-type: "cuda" gpu-arch-version: "12.1" + - name: CUDA 2.5 + runs-on: linux.g5.12xlarge.nvidia.gpu + torch-spec: 'torch==2.5.0 --index-url https://download.pytorch.org/whl/cu121' + gpu-arch-type: "cuda" + gpu-arch-version: "12.1" - name: CUDA Nightly runs-on: linux.g5.12xlarge.nvidia.gpu torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cu121' gpu-arch-type: "cuda" gpu-arch-version: "12.1" - - name: CPU 2.2.2 - runs-on: linux.4xlarge - torch-spec: 'torch==2.2.2 --index-url https://download.pytorch.org/whl/cpu "numpy<2" ' - gpu-arch-type: "cpu" - gpu-arch-version: "" - name: CPU 2.3 runs-on: linux.4xlarge torch-spec: 'torch==2.3.0 --index-url https://download.pytorch.org/whl/cpu' @@ -59,6 +54,11 @@ jobs: torch-spec: 'torch==2.4.0 --index-url https://download.pytorch.org/whl/cpu' gpu-arch-type: "cpu" gpu-arch-version: "" + - name: CPU 2.5 + runs-on: linux.4xlarge + torch-spec: 'torch==2.5.0 --index-url https://download.pytorch.org/whl/cpu' + gpu-arch-type: "cpu" + gpu-arch-version: "" - name: CPU Nightly runs-on: linux.4xlarge torch-spec: '--pre torch --index-url https://download.pytorch.org/whl/nightly/cpu' diff --git a/test/dtypes/test_affine_quantized_tensor_parallel.py b/test/dtypes/test_affine_quantized_tensor_parallel.py index 42511e7db8..af07328407 100644 --- a/test/dtypes/test_affine_quantized_tensor_parallel.py +++ b/test/dtypes/test_affine_quantized_tensor_parallel.py @@ -18,7 +18,7 @@ ) from torchao.quantization.quant_api import quantize_ from torchao.dtypes import AffineQuantizedTensor -from torchao.utils import TORCH_VERSION_AT_LEAST_2_5 +from torchao.utils import TORCH_VERSION_AT_LEAST_2_6 class TestAffineQuantizedTensorParallel(DTensorTestBase): """Basic test case for tensor subclasses @@ -111,8 +111,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: y_d = dn_dist(up_dist(input_dtensor)) - if not TORCH_VERSION_AT_LEAST_2_5: - # Need torch 2.5 to support compiled tensor parallelism + if not TORCH_VERSION_AT_LEAST_2_6: + # Need torch 2.6 to support compiled tensor parallelism return up_compiled = torch.compile(up_dist) diff --git a/torchao/testing/utils.py b/torchao/testing/utils.py index 5211065e14..39edc50085 100644 --- a/torchao/testing/utils.py +++ b/torchao/testing/utils.py @@ -10,7 +10,7 @@ from torchao.dtypes import to_affine_quantized_intx from torchao.quantization.quant_primitives import MappingType from torchao.quantization import quantize_, int8_weight_only -from torchao.utils import TORCH_VERSION_AT_LEAST_2_5 +from torchao.utils import TORCH_VERSION_AT_LEAST_2_6 """ How to use: @@ -322,8 +322,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: y_d = dn_dist(up_dist(input_dtensor)) - if not TORCH_VERSION_AT_LEAST_2_5: - # Need torch 2.5 to support compiled tensor parallelism + if not TORCH_VERSION_AT_LEAST_2_6: + # Need torch 2.6 to support compiled tensor parallelism return up_compiled = torch.compile(up_dist)