diff --git a/.github/workflows/regression_test.yml b/.github/workflows/regression_test.yml index 97946adcc2..c39aa4750f 100644 --- a/.github/workflows/regression_test.yml +++ b/.github/workflows/regression_test.yml @@ -59,35 +59,35 @@ jobs: fail-fast: false matrix: include: - - name: CUDA 2.3 + - name: CUDA 2.5.1 runs-on: linux.g5.12xlarge.nvidia.gpu - torch-spec: 'torch==2.3.0' + torch-spec: 'torch==2.5.1 --index-url https://download.pytorch.org/whl/cu121' gpu-arch-type: "cuda" - gpu-arch-version: "12.1" - - name: CUDA 2.4 + gpu-arch-version: "12.6" + - name: CUDA 2.6 runs-on: linux.g5.12xlarge.nvidia.gpu - torch-spec: 'torch==2.4.0' + torch-spec: 'torch==2.6.0' gpu-arch-type: "cuda" - gpu-arch-version: "12.1" - - name: CUDA 2.5.1 + gpu-arch-version: "12.6" + - name: CUDA 2.7 runs-on: linux.g5.12xlarge.nvidia.gpu - torch-spec: 'torch==2.5.1 --index-url https://download.pytorch.org/whl/cu121' + torch-spec: 'torch==2.7.0' gpu-arch-type: "cuda" - gpu-arch-version: "12.1" + gpu-arch-version: "12.6" - - name: CPU 2.3 + - name: CPU 2.5.1 runs-on: linux.4xlarge - torch-spec: 'torch==2.3.0 --index-url https://download.pytorch.org/whl/cpu' + torch-spec: 'torch==2.5.1 --index-url https://download.pytorch.org/whl/cpu' gpu-arch-type: "cpu" gpu-arch-version: "" - - name: CPU 2.4 + - name: CPU 2.6 runs-on: linux.4xlarge - torch-spec: 'torch==2.4.0 --index-url https://download.pytorch.org/whl/cpu' + torch-spec: 'torch==2.6.0 --index-url https://download.pytorch.org/whl/cpu' gpu-arch-type: "cpu" gpu-arch-version: "" - - name: CPU 2.5.1 + - name: CPU 2.7 runs-on: linux.4xlarge - torch-spec: 'torch==2.5.1 --index-url https://download.pytorch.org/whl/cpu' + torch-spec: 'torch==2.7.0 --index-url https://download.pytorch.org/whl/cpu' gpu-arch-type: "cpu" gpu-arch-version: "" diff --git a/test/dtypes/test_nf4.py b/test/dtypes/test_nf4.py index 1d63eb33e2..f52644cdf3 100644 --- a/test/dtypes/test_nf4.py +++ b/test/dtypes/test_nf4.py @@ -39,7 +39,7 @@ to_nf4, ) from torchao.testing.utils import skip_if_rocm -from torchao.utils import TORCH_VERSION_AT_LEAST_2_8 +from torchao.utils import TORCH_VERSION_AT_LEAST_2_7 bnb_available = False @@ -119,7 +119,7 @@ def test_backward_dtype_match(self, dtype: torch.dtype): @unittest.skipIf(not bnb_available, "Need bnb availble") @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") @unittest.skipIf( - TORCH_VERSION_AT_LEAST_2_8, reason="Failing in CI" + TORCH_VERSION_AT_LEAST_2_7, reason="Failing in CI" ) # TODO: fix this @skip_if_rocm("ROCm enablement in progress") @parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) @@ -146,7 +146,7 @@ def test_reconstruction_qlora_vs_bnb(self, dtype: torch.dtype): @unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") @skip_if_rocm("ROCm enablement in progress") @unittest.skipIf( - TORCH_VERSION_AT_LEAST_2_8, reason="Failing in CI" + TORCH_VERSION_AT_LEAST_2_7, reason="Failing in CI" ) # TODO: fix this @parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) def test_nf4_bnb_linear(self, dtype: torch.dtype): diff --git a/test/quantization/test_galore_quant.py b/test/quantization/test_galore_quant.py index d32250cdb9..9930ae3e02 100644 --- a/test/quantization/test_galore_quant.py +++ b/test/quantization/test_galore_quant.py @@ -7,7 +7,7 @@ import pytest -from torchao.utils import TORCH_VERSION_AT_LEAST_2_8 +from torchao.utils import TORCH_VERSION_AT_LEAST_2_7 # Skip entire test if triton is not available, otherwise CI failure try: # noqa: F401 @@ -94,7 +94,7 @@ def test_galore_quantize_blockwise(dim1, dim2, dtype, signed, blocksize): @skip_if_rocm("ROCm enablement in progress") @pytest.mark.skipif(not torch.cuda.is_available(), reason="Need CUDA available") @pytest.mark.skipif( - TORCH_VERSION_AT_LEAST_2_8, reason="Failing in CI" + TORCH_VERSION_AT_LEAST_2_7, reason="Failing in CI" ) # TODO: fix this def test_galore_dequant_blockwise(dim1, dim2, dtype, signed, blocksize): g = torch.randn(dim1, dim2, device="cuda", dtype=dtype) * 0.01 diff --git a/test/test_low_bit_optim.py b/test/test_low_bit_optim.py index c6890b05c0..43941329e1 100644 --- a/test/test_low_bit_optim.py +++ b/test/test_low_bit_optim.py @@ -35,7 +35,7 @@ from torchao.utils import ( TORCH_VERSION_AT_LEAST_2_4, TORCH_VERSION_AT_LEAST_2_5, - TORCH_VERSION_AT_LEAST_2_8, + TORCH_VERSION_AT_LEAST_2_7, get_available_devices, ) @@ -197,7 +197,7 @@ def test_subclass_slice(self, subclass, shape, device): ) @skip_if_rocm("ROCm enablement in progress") @pytest.mark.skipif( - TORCH_VERSION_AT_LEAST_2_8, reason="Failing in CI" + TORCH_VERSION_AT_LEAST_2_7, reason="Failing in CI" ) # TODO: fix this @parametrize("optim_name", ["Adam8bit", "AdamW8bit"]) def test_optim_8bit_correctness(self, optim_name):