Skip to content

Commit

Permalink
add 3.12 inductor CI tests
Browse files Browse the repository at this point in the history
ghstack-source-id: 54279665250323134ec3ba7f724f89a34833cce6
Pull Request resolved: #126218
  • Loading branch information
williamwen42 committed May 15, 2024
1 parent 1966612 commit e055788
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 0 deletions.
15 changes: 15 additions & 0 deletions .ci/docker/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,21 @@ case "$image" in
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks)
CUDA_VERSION=12.1.1
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.12
GCC_VERSION=9
PROTOBUF=yes
DB=yes
VISION=yes
KATEX=yes
UCX_COMMIT=${_UCX_COMMIT}
UCC_COMMIT=${_UCC_COMMIT}
CONDA_CMAKE=yes
TRITON=yes
INDUCTOR_BENCHMARKS=yes
;;
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9)
CUDA_VERSION=11.8.0
CUDNN_VERSION=8
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/docker-builds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ jobs:
pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks,
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9,
pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks,
pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks,
pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9,
pytorch-linux-focal-py3.8-clang10,
pytorch-linux-focal-py3.11-clang10,
Expand Down
21 changes: 21 additions & 0 deletions .github/workflows/inductor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,27 @@ jobs:
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}

linux-focal-cuda12_1-py3_12-gcc9-inductor-build:
name: cuda12.1-py3.12-gcc9-sm86
uses: ./.github/workflows/_linux-build.yml
with:
build-environment: linux-focal-cuda12.1-py3.12-gcc9-sm86
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3.12-gcc9-inductor-benchmarks
cuda-arch-list: '8.6'
test-matrix: |
{ include: [
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
]}
linux-focal-cuda12_1-py3_12-gcc9-inductor-test:
name: cuda12.1-py3.12-gcc9-sm86
uses: ./.github/workflows/_linux-test.yml
needs: linux-focal-cuda12_1-py3_12-gcc9-inductor-build
with:
build-environment: linux-focal-cuda12.1-py3.12-gcc9-sm86
docker-image: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.test-matrix }}

linux-jammy-cpu-py3_8-gcc11-inductor-build:
name: linux-jammy-cpu-py3.8-gcc11-inductor
uses: ./.github/workflows/_linux-build.yml
Expand Down
5 changes: 5 additions & 0 deletions test/inductor/test_torchinductor.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
expectedFailureCodegenDynamic,
rand_strided,
same,
skipIfPy312,
xfailIfPy312,
)
from torch._inductor.codegen.common import DataTypePropagation, OptimizationContext
from torch._inductor.fx_passes import pad_mm
Expand Down Expand Up @@ -2743,6 +2745,7 @@ def fn(a, b):
check_lowp=False,
)

@skipIfPy312 # segfaults
@config.patch(force_mixed_mm=True)
def test_mixed_mm(self):
def fn(a, b):
Expand All @@ -2757,6 +2760,7 @@ def fn(a, b):
check_lowp=True,
)

@skipIfPy312 # segfaults
@config.patch(force_mixed_mm=True)
def test_mixed_mm2(self):
def fn(a, b, scale, bias):
Expand Down Expand Up @@ -9448,6 +9452,7 @@ def fn(inp, offsets):

self.common(fn, (inp, offsets), check_lowp=False)

@xfailIfPy312
@requires_gpu()
@config.patch(assume_aligned_inputs=False)
def test_config_option_dont_assume_alignment(self):
Expand Down
6 changes: 6 additions & 0 deletions torch/_dynamo/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,12 @@ def xfailIfPy312(fn):
return fn


def skipIfPy312(fn):
if sys.version_info >= (3, 12):
return unittest.skip(fn)
return fn


# Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py
# and test/dynamo/test_dynamic_shapes.py
def expectedFailureDynamic(fn):
Expand Down

0 comments on commit e055788

Please sign in to comment.