diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml
deleted file mode 100644
index 569facc32cdf5..0000000000000
--- a/.github/actionlint.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-self-hosted-runner:
- labels:
- - linux.20_04.4x
- - linux.20_04.16x
- - linux.large
- - linux.large.arc
- - linux.2xlarge
- - linux.4xlarge
- - linux.12xlarge
- - linux.24xlarge
- - linux.arm64.2xlarge
- - linux.4xlarge.nvidia.gpu
- - linux.8xlarge.nvidia.gpu
- - linux.16xlarge.nvidia.gpu
- - linux.g5.4xlarge.nvidia.gpu
- - linux.s390x
- - windows.4xlarge.nonephemeral
- - windows.8xlarge.nvidia.gpu
- - windows.8xlarge.nvidia.gpu.nonephemeral
- - windows.g5.4xlarge.nvidia.gpu
- - bm-runner
- - linux.rocm.gpu
- - macos-m1-stable
- - macos-m1-13
- - macos-m1-14
- - macos-12-xl
- - macos-12
- - macos12.3-m1
- - macos-latest-xlarge
- - macos-13-xlarge
diff --git a/.github/auto_request_review.yml b/.github/auto_request_review.yml
deleted file mode 100644
index 3ec436d107622..0000000000000
--- a/.github/auto_request_review.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Documented at https://github.com/necojackarc/auto-request-review
-reviewers:
- groups:
- symbolic-shapes:
- - ezyang
- - albanD
- - miladm
- - bdhirsh
-
- per_author:
- symbolic-shapes:
- - symbolic-shapes
- - antoniojkim
- - SherlockNoMad
- Chillee:
- - ezyang
-
-files:
- # none yet, TODO: migrate CODEOWNERS here
-
-options:
- ignore_draft: true
- ignored_keywords:
- - DO NOT REVIEW
- # Just manually setup a self-referential per_author rule if you
- # want group assignment
- enable_group_assignment: false
diff --git a/.github/label_to_label.yml b/.github/label_to_label.yml
deleted file mode 100644
index e6c66a5e56cf6..0000000000000
--- a/.github/label_to_label.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-# Use this to auto apply labels based on other labels. Applies to both PRs and
-# issues. Currently only supports any and all
-- any:
- - "module: custom operators"
- - "module: aotdispatch"
- then:
- - "module: pt2-dispatcher"
-- any:
- - "module: dynamo"
- - "module: pt2-dispatcher"
- - "module: inductor"
- then:
- - "oncall: pt2"
diff --git a/.github/labeler.yml b/.github/labeler.yml
deleted file mode 100644
index f436ec684ffb9..0000000000000
--- a/.github/labeler.yml
+++ /dev/null
@@ -1,101 +0,0 @@
-"module: dynamo":
-- torch/_dynamo/**
-- torch/csrc/dynamo/**
-- benchmarks/dynamo/**
-- test/dynamo/**
-
-"module: inductor":
-- torch/_inductor/**
-- test/inductor/**
-
-"ciflow/inductor":
-- torch/_decomp/**
-- torch/_dynamo/**
-- torch/_export/**
-- torch/_inductor/**
-- benchmarks/dynamo/**
-- torch/_subclasses/fake_tensor.py
-- torch/_subclasses/fake_utils.py
-- torch/_subclasses/meta_utils.py
-- test/distributed/test_dynamo_distributed.py
-- test/distributed/test_inductor_collectives.py
-- torch/_functorch/_aot_autograd/**
-- torch/_functorch/aot_autograd.py
-- torch/_functorch/partitioners.py
-- .ci/docker/ci_commit_pins/**
-- .github/ci_commit_pins/**
-- c10/core/Sym*
-- torch/fx/experimental/symbolic_shapes.py
-- torch/fx/experimental/recording.py
-- torch/fx/experimental/sym_node.py
-- torch/fx/experimental/validator.py
-- torch/fx/experimental/_sym_dispatch_mode.py
-- torch/fx/experimental/proxy_tensor.py
-- test/distributed/_tensor/test_dtensor_compile.py
-- test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
-- torch/distributed/_tensor/**
-- torch/distributed/fsdp/**
-- torch/csrc/inductor/**
-- test/cpp/aoti_abi_check/**
-- test/cpp/aoti_inference/**
-
-"module: cpu":
-- aten/src/ATen/cpu/**
-- aten/src/ATen/native/cpu/**
-- aten/src/ATen/native/quantized/cpu/**
-- aten/src/ATen/native/Convolution*.cpp
-- aten/src/ATen/native/mkldnn/**
-- torch/cpu/**
-- torch/utils/mkldnn.py
-- torch/utils/_sympy/**
-- test/test_mkldnn.py
-
-"module: mkldnn":
-- third_party/ideep
-- caffe2/ideep/**
-- caffe2/python/ideep/**
-- cmake/Modules/FindMKLDNN.cmake
-- third_party/mkl-dnn.BUILD
-- torch/csrc/jit/codegen/onednn/**
-- test/test_jit_llga_fuser.py
-- test/test_mkldnn.py
-
-"ciflow/linux-aarch64":
-- third_party/ideep
-- caffe2/ideep/**
-- caffe2/python/ideep/**
-- cmake/Modules/FindMKLDNN.cmake
-- third_party/mkl-dnn.BUILD
-- torch/csrc/jit/codegen/onednn/**
-- test/test_jit_llga_fuser.py
-- test/test_mkldnn.py
-
-"module: amp (automated mixed precision)":
-- torch/amp/**
-- aten/src/ATen/autocast_mode.*
-- torch/csrc/jit/passes/autocast.cpp
-- test/test_autocast.py
-
-"NNC":
-- torch/csrc/jit/tensorexpr/**
-
-"release notes: quantization":
-- torch/ao/quantization/**
-- torch/quantization/**
-- aten/src/ATen/quantized/**
-- aten/src/ATen/native/quantized/cpu/**
-- test/quantization/**
-
-"ciflow/trunk":
-- .ci/docker/ci_commit_pins/triton.txt
-
-"oncall: distributed":
-- torch/csrc/distributed/**
-- torch/distributed/**
-- torch/nn/parallel/**
-- test/distributed/**
-- torch/testing/_internal/distributed/**
-
-"module: distributed_checkpoint":
-- torch/distributed/checkpoint/**
-- test/distributed/checkpoint/**
diff --git a/.github/merge_rules.yaml b/.github/merge_rules.yaml
deleted file mode 100644
index db0ec3c51aa79..0000000000000
--- a/.github/merge_rules.yaml
+++ /dev/null
@@ -1,515 +0,0 @@
-- name: ONNX exporter
- patterns:
- - .ci/caffe2/*
- - .ci/onnx/*
- - .ci/docker/common/install_onnx.sh
- - aten/src/ATen/core/interned_strings.h
- - benchmarks/dynamo/**
- - docs/source/onnx.rst
- - docs/source/onnx*
- - docs/source/scripts/onnx/**
- - docs/source/_static/img/onnx/**
- - scripts/onnx/**
- - test/onnx/**
- - test/onnx_caffe2/**
- - tools/onnx/**
- - torch/_dynamo/backends/onnxrt.py
- - torch/_C/__init__.pyi.in
- - torch/_C/_onnx.pyi
- - torch/_logging/**
- - torch/csrc/jit/passes/onnx.*
- - torch/csrc/jit/passes/onnx/**
- - torch/csrc/jit/serialization/export.*
- - torch/csrc/jit/serialization/onnx.*
- - torch/csrc/onnx/**
- - torch/onnx/**
- - torch/testing/_internal/common_methods_invocations.py
- - third_party/onnx
- - caffe2/python/onnx/**
- approved_by:
- - BowenBao
- - justinchuby
- - liqunfu
- - shubhambhokare1
- - thiagocrepaldi
- - titaiwangms
- - wschin
- - xadupre
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: NVFuser
- patterns:
- - test/test_jit_cuda_fuser.py
- - torch/csrc/jit/codegen/fuser/cuda/**
- - torch/csrc/jit/codegen/cuda/**
- - benchmarks/cpp/nvfuser/**
- approved_by:
- - csarofeen
- - ngimel
- - jjsjann123
- - kevinstephano
- - ptrblck
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: OSS CI
- patterns:
- - .github/**
- - .circleci/**
- - .ci/**
- - scripts/**
- - tools/**
- approved_by:
- - alband
- - dagitses
- - pytorch/pytorch-dev-infra
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: OSS CI / pytorchbot
- patterns:
- - .github/ci_commit_pins/audio.txt
- - .github/ci_commit_pins/vision.txt
- - .github/ci_commit_pins/torchdynamo.txt
- - .ci/docker/ci_commit_pins/triton.txt
- approved_by:
- - pytorchbot
- ignore_flaky_failures: false
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
- - inductor
-
-- name: OSS CI /pytorchbot / Executorch
- patterns:
- - .ci/docker/ci_commit_pins/executorch.txt
- approved_by:
- - pytorchbot
- ignore_flaky_failures: false
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull / linux-jammy-py3-clang12-executorch / build
- - pull / linux-jammy-py3-clang12-executorch / test (executorch, 1, 1, linux.2xlarge)
-
-- name: OSS CI / pytorchbot / XLA
- patterns:
- - .github/ci_commit_pins/xla.txt
- approved_by:
- - pytorchbot
- ignore_flaky_failures: false
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull / linux-focal-py3_8-clang9-xla / build
- - pull / linux-focal-py3_8-clang9-xla / test (xla, 1, 1, linux.12xlarge)
-
-- name: Documentation
- patterns:
- - docs/**
- - torch/*docs.py
- approved_by:
- - svekars
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Mobile
- patterns:
- - ios/**
- - android/**
- - test/mobile/**
- approved_by:
- - linbinyu
- - IvanKobzarev
- - dreiss
- - raziel
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: PrimTorch
- patterns:
- - torch/_meta_registrations.py
- - torch/_decomp/**
- - torch/_refs/**
- - torch/_prims/**
- - torch/_prims_common/**
- approved_by:
- - nkaretnikov
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Linear Algebra
- patterns:
- - aten/src/ATen/native/cuda/linalg/**
- - aten/src/ATen/LinalgBackend.h
- - aten/src/ATen/native/**LinearAlgebra*
- - docs/source/linalg.rst
- - torch/linalg/**
- - torch/_linalg_utils.py
- - torch/**python_linalg_functions.*
- - torch/**linalg.h
- - tools/autograd/templates/python_linalg_functions.cpp
- - test/test_linalg.py
- approved_by:
- - mruberry
- - lezcano
- - IvanYashchuk
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: FFT
- patterns:
- - aten/src/ATen/native/cuda/*FFT*.h
- - aten/src/ATen/native/SpectralOps.cpp
- - aten/src/ATen/native/mkl/SpectralOps.cpp
- - aten/src/ATen/native/cuda/SpectralOps.*
- - docs/source/fft.rst
- - torch/fft/**
- - torch/csrc/api/include/torch/fft.h
- - torch/**python_fft_functions.*
- - tools/autograd/templates/python_fft_functions.cpp
- - test/cpp/api/fft.cpp
- approved_by:
- - mruberry
- - peterbell10
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Sparse
- patterns:
- - benchmarks/sparse
- - c10/util/sparse_bitset.h
- - docs/source/sparse.rst
- - torch/**sparse/**
- - torch/**sparse*
- - torch/optim/sparse*
- - torch/ao/nn/sparse/**
- - torch/utils/benchmark/**sparse*
- - aten/src/ATen/native/ao_sparse/**
- - aten/src/ATen/native/sparse/**
- - aten/src/ATen/**Sparse*
- - aten/src/ATen/*Sparse*
- - torch/_masked/**
- - test/*_masked*
- - test/**sparse*
- approved_by:
- - nikitaved
- - cpuhrsch
- - pearu
- - IvanYashchuk
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: MPS
- patterns:
- - test/test_mps.py
- - aten/src/ATen/native/native_functions.yaml
- - aten/src/ATen/mps/**
- - aten/src/ATen/native/mps/**
- approved_by:
- - kulinseth
- - alband
- - malfet
- - razarmehr
- - DenisVieriu97
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: XPU ATen
- patterns:
- - aten/src/ATen/xpu/**
- - c10/xpu/**
- - torch/csrc/xpu/**
- - torch/xpu/**
- - test/xpu/**
- - third_party/xpu.txt
- approved_by:
- - EikanWang
- - jgong5
- - gujinghui
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Distributions
- patterns:
- - torch/distributions/**
- - test/distributions/**
- approved_by:
- - fritzo
- - neerajprad
- - alicanb
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Distributed
- patterns:
- - docs/source/pipeline.rst
- - docs/source/distributed*
- - docs/source/rpc.rst
- - docs/source/rpc/**
- - docs/source/_static/img/rpc*
- - docs/source/_static/img/*distributed*
- - docs/source/elastic/**
- - benchmarks/distributed/**
- - torch/distributed/**
- - torch/nn/parallel/distributed*
- - torch/_C/_distributed*
- - torch/csrc/distributed/**
- - torch/testing/_internal/distributed/**
- - test/distributed/**
- - test/cpp/dist_autograd/**
- - test/cpp/rpc/**
- approved_by:
- - mrshenli
- - pritamdamania87
- - zhaojuanmao
- - rohan-varma
- - wanchaol
- - fduwjj
- - H-Huang
- - kwen2501
- - XilunWu
- - wz337
- - awgu
- - fegin
- - kurman
- - LucasLLC
- - sanketpurandare
- - shuqiangzhang
- - tianyu-l
- - kiukchung
- - d4l3k
- - shuqiangzhang
- - weifengpy
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: IDEEP
- patterns:
- - third_party/ideep
- - caffe2/ideep/**
- - caffe2/python/ideep/**
- - cmake/Modules/FindMKLDNN.cmake
- - third_party/mkl-dnn.BUILD
- approved_by:
- - XiaobingSuper
- - jgong5
- - mingfeima
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: oneDNN graph
- patterns:
- - torch/csrc/jit/codegen/onednn/**
- - test/test_jit_llga_fuser.py
- approved_by:
- - sanchitintel
- - chunyuan-w
- - jgong5
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: CPU ATen backend
- patterns:
- - aten/src/ATen/cpu/**
- - aten/src/ATen/native/cpu/**
- - aten/src/ATen/native/quantized/cpu/**
- - aten/src/ATen/native/Convolution*.cpp
- - aten/src/ATen/native/mkldnn/**
- - test/test_mkl*.py
- approved_by:
- - mingfeima
- - XiaobingSuper
- - jgong5
- - vfdev-5
- - leslie-fang-intel
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: CPU frontend
- patterns:
- - torch/cpu/**
- - torch/utils/mkldnn.py
- - test/test_mkldnn.py
- approved_by:
- - leslie-fang-intel
- - jgong5
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: CPU inductor
- patterns:
- - torch/_inductor/mkldnn_lowerings.py
- - torch/_inductor/fx_passes/mkldnn_fusion.py
- - torch/_inductor/fx_passes/quantization.py
- - torch/_inductor/codegen/cpp.py
- - test/inductor/test_mkldnn_pattern_matcher.py
- - test/inductor/test_cpu_repo.py
- - test/inductor/test_cpu_cpp_wrapper.py
- - aten/src/ATen/cpu/**
- - aten/src/ATen/native/quantized/cpu/**
- - test/quantization/core/test_quantized_op.py
- - torch/ao/quantization/quantizer/x86_inductor_quantizer.py
- - test/quantization/pt2e/test_x86inductor_quantizer.py
- approved_by:
- - leslie-fang-intel
- - jgong5
- - EikanWang
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Autocast
- patterns:
- - torch/amp/**
- - aten/src/ATen/autocast_mode.*
- - torch/csrc/jit/passes/autocast.cpp
- - test/test_autocast.py
- - torch/testing/_internal/autocast_test_lists.py
- approved_by:
- - leslie-fang-intel
- - jgong5
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: NNC
- patterns:
- - torch/csrc/jit/tensorexpr/**
- approved_by:
- - EikanWang
- - jgong5
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Lazy Tensor
- patterns:
- - torch/csrc/lazy/**
- - test/cpp/lazy/**
- - test/lazy/**
- - torchgen/api/lazy.py
- - torchgen/dest/lazy_ir.py
- - torchgen/dest/lazy_ts_lowering.py
- - torchgen/gen_lazy_tensor.py
- - aten/src/ATen/native/ts_native_functions.yaml
- - .github/ci_commit_pins/xla.txt
- approved_by:
- - alanwaketan
- - JackCaoG
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: functorch
- patterns:
- - functorch/**
- - test/functorch/**
- - torch/_C/__init__.pyi.in
- - torch/__init__.py
- - torch/csrc/functorch/**
- - torch/_functorch/**
- - torch/func/**
- - aten/src/ATen/functorch/**
- - docs/source/func**
- - '**vmap**'
- - '**functorch**'
- - '**pytree**'
- approved_by:
- - kshitij12345
- - srossross
- - chillee
- - zou3519
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: ROCm
- patterns:
- - '**rocm**'
- - '**hip**'
- approved_by:
- - jeffdaily
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: superuser
- patterns:
- - '*'
- approved_by:
- - pytorch/metamates
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Core Reviewers
- patterns:
- - '*'
- approved_by:
- - mruberry
- - lezcano
- - Skylion007
- - ngimel
- - peterbell10
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
-
-- name: Core Maintainers
- patterns:
- - '*'
- approved_by:
- - soumith
- - gchanan
- - ezyang
- - dzhulgakov
- - malfet
- mandatory_checks_name:
- - EasyCLA
- - Lint
- - pull
diff --git a/.github/pytorch-circleci-labels.yml b/.github/pytorch-circleci-labels.yml
deleted file mode 100644
index 6990a3d304b24..0000000000000
--- a/.github/pytorch-circleci-labels.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# For documentation concerning this configuration please refer to,
-# https://github.com/pytorch/pytorch-probot#trigger-circleci-workflows
-labels_to_circle_params:
- ci/binaries:
- parameter: run_binary_tests
- default_true_on:
- branches:
- - nightly
- - release/.*
- tags:
- - v[0-9]+(\.[0-9]+)*-rc[0-9]+
- set_to_false:
- - run_build
- ci/master:
- parameter: run_master_build
- set_to_false:
- - run_build
- ci/slow-gradcheck:
- parameter: run_slow_gradcheck_build
- set_to_false:
- - run_build
diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml
deleted file mode 100644
index ade85af096871..0000000000000
--- a/.github/pytorch-probot.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-tracking_issue: 24422
-ciflow_tracking_issue: 64124
-ciflow_push_tags:
-- ciflow/binaries
-- ciflow/binaries_conda
-- ciflow/binaries_libtorch
-- ciflow/binaries_wheel
-- ciflow/inductor
-- ciflow/inductor-perf-compare
-- ciflow/inductor-micro-benchmark
-- ciflow/linux-aarch64
-- ciflow/mps
-- ciflow/nightly
-- ciflow/periodic
-- ciflow/rocm
-- ciflow/slow
-- ciflow/trunk
-- ciflow/unstable
-- ciflow/xpu
-- ciflow/torchbench
-retryable_workflows:
-- lint
-- pull
-- trunk
-- linux-binary
-- windows-binary
-labeler_config: labeler.yml
-label_to_label_config: label_to_label.yml
diff --git a/.github/workflows/_android-build-test.yml b/.github/workflows/_android-build-test.yml
deleted file mode 100644
index d599e769b8b6a..0000000000000
--- a/.github/workflows/_android-build-test.yml
+++ /dev/null
@@ -1,145 +0,0 @@
-name: android-build-test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image-name:
- required: true
- type: string
- description: Name of the base docker image to build with.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- build-and-test:
- needs: filter
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image-name }}
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Output disk space left
- run: |
- sudo df -H
-
- - name: Preserve github env variables for use in docker
- run: |
- env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
- env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
-
- - name: Build
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- TORCH_CUDA_ARCH_LIST: 5.2
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- run: |
- set -e
- # Unlike other gradle jobs, it's not worth building libtorch in a separate CI job and share via docker, because:
- # 1) Not shareable: it's custom selective build, which is different from default libtorch mobile build;
- # 2) Not parallelizable by architecture: it only builds libtorch for one architecture;
-
- export BUILD_LITE_INTERPRETER
- BUILD_LITE_INTERPRETER="1"
- if [[ "${BUILD_ENVIRONMENT}" == *"full-jit" ]]; then
- BUILD_LITE_INTERPRETER="0"
- fi
-
- git submodule sync && git submodule update -q --init --recursive --depth 1
- export id
- id=$(docker run -e BUILD_ENVIRONMENT \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e SCCACHE_BUCKET \
- -e SKIP_SCCACHE_INITIALIZATION=1 \
- -e TORCH_CUDA_ARCH_LIST \
- -e BUILD_LITE_INTERPRETER \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --tty \
- --detach \
- --user jenkins \
- -v "$(pwd):/var/lib/jenkins/workspace" \
- --cap-add=SYS_PTRACE \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --security-opt seccomp=unconfined \
- -t -d -w /var/lib/jenkins "${DOCKER_IMAGE}")
-
- export COMMAND
- # shellcheck disable=SC2016
- COMMAND='(echo "sudo chown -R jenkins workspace && cd workspace && ./scripts/build_android_gradle.sh" | docker exec -u jenkins -e BUILD_LITE_INTERPRETER -e GRADLE_OFFLINE=1 -i "$id" bash) 2>&1'
- echo "${COMMAND}" > ./command.sh && bash ./command.sh
- # Skip docker push as this job is purely for size analysis purpose.
- # Result binaries are already in `/home/circleci/project/` as it's mounted instead of copied.
-
- - name: Chown workspace
- uses: ./.github/actions/chown-workspace
- if: always()
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/_android-full-build-test.yml b/.github/workflows/_android-full-build-test.yml
deleted file mode 100644
index 7a0c4377eca4e..0000000000000
--- a/.github/workflows/_android-full-build-test.yml
+++ /dev/null
@@ -1,190 +0,0 @@
-name: android-full-build-test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image-name:
- required: true
- type: string
- description: Name of the base docker image to build with.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- build:
- needs: filter
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image-name }}
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Output disk space left
- shell: bash
- run: |
- sudo df -H
-
- - name: Preserve github env variables for use in docker
- shell: bash
- run: |
- env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
- env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Build arm-v7a
- uses: ./.github/actions/build-android
- with:
- arch: arm_v7a
- arch-for-build-env: arm-v7a
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- build-environment: ${{ inputs.build-environment }}
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- branch: ${{ steps.parse-ref.outputs.branch }}
-
- - name: Build arm-v8a
- uses: ./.github/actions/build-android
- with:
- arch: arm_v8a
- arch-for-build-env: arm-v8a
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- build-environment: ${{ inputs.build-environment }}
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- branch: ${{ steps.parse-ref.outputs.branch }}
-
- - name: Build x86_32
- id: build-x86_32
- uses: ./.github/actions/build-android
- with:
- arch: x86_32
- arch-for-build-env: x86_32
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- build-environment: ${{ inputs.build-environment }}
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- branch: ${{ steps.parse-ref.outputs.branch }}
-
- - name: Build x86_64
- uses: ./.github/actions/build-android
- with:
- arch: x86_64
- arch-for-build-env: x86_64
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- build-environment: ${{ inputs.build-environment }}
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- branch: ${{ steps.parse-ref.outputs.branch }}
-
- - name: Build final artifact
- env:
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- AWS_DEFAULT_REGION: us-east-1
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- ID_X86_32: ${{ steps.build-x86_32.outputs.container_id }}
- run: |
- set -eux
-
- # Putting everything together
- # ID_X86_32 container were created during build-x86_32 step
- docker cp "${GITHUB_WORKSPACE}/build_android_install_arm_v7a" "${ID_X86_32}:/var/lib/jenkins/workspace/build_android_install_arm_v7a"
- docker cp "${GITHUB_WORKSPACE}/build_android_install_x86_64" "${ID_X86_32}:/var/lib/jenkins/workspace/build_android_install_x86_64"
- docker cp "${GITHUB_WORKSPACE}/build_android_install_arm_v8a" "${ID_X86_32}:/var/lib/jenkins/workspace/build_android_install_arm_v8a"
- docker cp "${GITHUB_WORKSPACE}/build_android_install_x86_32" "${ID_X86_32}:/var/lib/jenkins/workspace/build_android_install_x86_32"
-
- # run gradle buildRelease
- (echo "./scripts/build_android_gradle.sh" | docker exec \
- -e BUILD_ENVIRONMENT="pytorch-linux-focal-py3-clang9-android-ndk-r21e-gradle-build" \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e AWS_DEFAULT_REGION \
- -e PR_NUMBER \
- -e SHA1 \
- -e BRANCH \
- -e SCCACHE_BUCKET \
- -e SKIP_SCCACHE_INITIALIZATION=1 \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --user jenkins \
- -u jenkins -i "${ID_X86_32}" bash) 2>&1
-
- mkdir -p "${GITHUB_WORKSPACE}/build_android_artifacts"
- docker cp "${ID_X86_32}:/var/lib/jenkins/workspace/android/artifacts.tgz" "${GITHUB_WORKSPACE}/build_android_artifacts/"
-
- - name: Store PyTorch Android Build Artifacts on S3
- uses: seemethere/upload-artifact-s3@v5
- with:
- name: ${{ inputs.build-environment }}
- retention-days: 14
- if-no-files-found: error
- path: build_android_artifacts/artifacts.tgz
-
- - name: Chown workspace
- uses: ./.github/actions/chown-workspace
- if: always()
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/_bazel-build-test.yml b/.github/workflows/_bazel-build-test.yml
deleted file mode 100644
index ca65ce64bc657..0000000000000
--- a/.github/workflows/_bazel-build-test.yml
+++ /dev/null
@@ -1,205 +0,0 @@
-name: bazel
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image-name:
- required: true
- type: string
- description: Name of the base docker image to build with.
- cuda-version:
- required: true
- type: string
- description: What CUDA version to build with (i.e. "11.7"), "cpu" for none.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- build-and-test:
- needs: filter
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image-name }}
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Check if in a ARC runner
- shell: bash
- id: check_arc_runner
- run: echo "IN_ARC_RUNNER=$([ -f /.inarc ] && echo true || echo false)" >> "$GITHUB_OUTPUT"
-
- - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
- uses: pytorch/test-infra/.github/actions/setup-nvidia@main
- if: ${{ inputs.cuda-version != 'cpu' && steps.check_arc_runner.outputs.IN_ARC_RUNNER == 'false' }}
-
- - name: Output disk space left
- run: |
- sudo df -H
-
- - name: Preserve github env variables for use in docker
- run: |
- env | grep '^GITHUB' >> "/tmp/github_env_${GITHUB_RUN_ID}"
- env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Build
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- REENABLED_ISSUES: ${{ needs.filter.outputs.reenabled-issues }}
- # TODO duplicated
- AWS_DEFAULT_REGION: us-east-1
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- TORCH_CUDA_ARCH_LIST: 5.2
- DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- CUDA_VERSION: ${{ inputs.cuda-version }}
- run: |
- export SHARD_NUMBER=0
- # detached container should get cleaned up by teardown_ec2_linux
- # TODO: Stop building test binaries as part of the build phase
- # Make sure we copy test results from bazel-testlogs symlink to
- # a regular directory ./test/test-reports
- # shellcheck disable=SC2086
- container_name=$(docker run \
- ${GPU_FLAG:-} \
- -e BUILD_ENVIRONMENT \
- -e GITHUB_ACTIONS \
- -e GITHUB_REPOSITORY \
- -e GITHUB_WORKFLOW \
- -e GITHUB_JOB \
- -e GITHUB_RUN_NUMBER \
- -e GITHUB_RUN_ATTEMPT \
- -e JOB_ID \
- -e GIT_DEFAULT_BRANCH="$GIT_DEFAULT_BRANCH" \
- -e SHARD_NUMBER \
- -e NUM_TEST_SHARDS \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e SCCACHE_BUCKET \
- -e SKIP_SCCACHE_INITIALIZATION=1 \
- -e REENABLED_ISSUES \
- -e TORCH_CUDA_ARCH_LIST \
- -e OUR_GITHUB_JOB_ID \
- -e CUDA_VERSION \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --shm-size="1g" \
- --tty \
- --detach \
- --user jenkins \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- docker exec -t "${container_name}" sh -c '.ci/pytorch/build.sh'
- echo "container_id=${container_name}" >> "${GITHUB_ENV}"
-
- - name: Test
- id: test
- # Time out the test phase after 3.5 hours
- timeout-minutes: 120
- run: |
- docker exec -t "${container_id}" sh -c '.ci/pytorch/test.sh && cp -Lr ./bazel-testlogs ./test/test-reports'
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Chown workspace
- uses: ./.github/actions/chown-workspace
- if: always()
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- file-suffix: bazel-${{ github.job }}_${{ steps.get-job-id.outputs.job-id }}
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/_binary-build-linux.yml b/.github/workflows/_binary-build-linux.yml
deleted file mode 100644
index 6f92af0ba380b..0000000000000
--- a/.github/workflows/_binary-build-linux.yml
+++ /dev/null
@@ -1,291 +0,0 @@
-name: linux-binary-build
-
-on:
- workflow_call:
- inputs:
- build_name:
- required: true
- type: string
- description: The build's name
- build_environment:
- required: true
- type: string
- description: The build environment
- runs_on:
- required: false
- default: linux.12xlarge
- type: string
- description: Hardware to run this "build"job on, linux.12xlarge or linux.arm64.2xlarge.
- ALPINE_IMAGE:
- required: false
- type: string
- default: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- description: Alpine image to use
- PYTORCH_ROOT:
- required: true
- type: string
- description: Root directory for the pytorch/pytorch repository
- BUILDER_ROOT:
- required: true
- type: string
- description: Root directory for the pytorch/builder repository
- PACKAGE_TYPE:
- required: true
- type: string
- description: Package type
- DESIRED_CUDA:
- required: true
- type: string
- description: Desired Cuda version
- GPU_ARCH_VERSION:
- required: false
- type: string
- description: GPU Arch version
- GPU_ARCH_TYPE:
- required: true
- type: string
- description: GPU Arch type
- DOCKER_IMAGE:
- required: true
- type: string
- description: Docker image to use
- LIBTORCH_CONFIG:
- required: false
- type: string
- description: Desired libtorch config (for libtorch builds only)
- LIBTORCH_VARIANT:
- required: false
- type: string
- description: Desired libtorch variant (for libtorch builds only)
- DESIRED_DEVTOOLSET:
- required: false
- type: string
- description: Desired dev toolset
- DESIRED_PYTHON:
- required: false
- type: string
- description: Desired python version
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS:
- required: false
- type: string
- description: Extra install requirements
- default: ""
- secrets:
- github-token:
- required: true
- description: Github Token
-
-jobs:
- build:
- runs-on: ${{ inputs.runs_on }}
- timeout-minutes: 210
- env:
- PYTORCH_ROOT: ${{ inputs.PYTORCH_ROOT }}
- BUILDER_ROOT: ${{ inputs.BUILDER_ROOT }}
- PACKAGE_TYPE: ${{ inputs.PACKAGE_TYPE }}
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: ${{ inputs.DESIRED_CUDA }}
- GPU_ARCH_VERSION: ${{ inputs.GPU_ARCH_VERSION }}
- GPU_ARCH_TYPE: ${{ inputs.GPU_ARCH_TYPE }}
- DOCKER_IMAGE: ${{ inputs.DOCKER_IMAGE }}
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: ${{ inputs.LIBTORCH_CONFIG }}
- LIBTORCH_VARIANT: ${{ inputs.LIBTORCH_VARIANT }}
- DESIRED_DEVTOOLSET: ${{ inputs.DESIRED_DEVTOOLSET }}
- DESIRED_PYTHON: ${{ inputs.DESIRED_PYTHON }}
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: ${{ inputs.PYTORCH_EXTRA_INSTALL_REQUIREMENTS }}
- # Needed for conda builds
- ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }}
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: ${{ inputs.build_environment }}
- GITHUB_TOKEN: ${{ secrets.github-token }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- steps:
- - name: Make the env permanent during this workflow (but not the secrets)
- shell: bash
- run: |
- {
- echo "PYTORCH_ROOT=${{ env.PYTORCH_ROOT }}"
- echo "BUILDER_ROOT=${{ env.BUILDER_ROOT }}"
- echo "PACKAGE_TYPE=${{ env.PACKAGE_TYPE }}"
- echo "DESIRED_CUDA=${{ env.DESIRED_CUDA }}"
- echo "GPU_ARCH_VERSION=${{ env.GPU_ARCH_VERSION }}"
- echo "GPU_ARCH_TYPE=${{ env.GPU_ARCH_TYPE }}"
- echo "DOCKER_IMAGE=${{ env.DOCKER_IMAGE }}"
- echo "SKIP_ALL_TESTS=${{ env.SKIP_ALL_TESTS }}"
- echo "LIBTORCH_CONFIG=${{ env.LIBTORCH_CONFIG }}"
- echo "LIBTORCH_VARIANT=${{ env.LIBTORCH_VARIANT }}"
- echo "DESIRED_DEVTOOLSET=${{ env.DESIRED_DEVTOOLSET }}"
- echo "DESIRED_PYTHON=${{ env.DESIRED_PYTHON }}"
- echo "PYTORCH_EXTRA_INSTALL_REQUIREMENTS=${{ env.PYTORCH_EXTRA_INSTALL_REQUIREMENTS }}"
- echo "ALPINE_IMAGE=${{ env.ALPINE_IMAGE }}"
- echo "ANACONDA_USER=${{ env.ANACONDA_USER }}"
- echo "AWS_DEFAULT_REGION=${{ env.AWS_DEFAULT_REGION }}"
- echo "BINARY_ENV_FILE=${{ env.BINARY_ENV_FILE }}"
- echo "BUILD_ENVIRONMENT=${{ env.BUILD_ENVIRONMENT }}"
- echo "BUILD_NAME=${{ env.BUILD_NAME }}"
- echo "PR_NUMBER=${{ env.PR_NUMBER }}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- echo "SHA1=${{ env.SHA1 }}"
- } >> "${GITHUB_ENV} }}"
-
- - name: List the env
- shell: bash
- run: env
-
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- if: inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.github-token }}
-
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }}
-
- - name: Setup Linux
- if: inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: ./.github/actions/setup-linux
-
- - name: Chown workspace
- if: inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: ./.github/actions/chown-workspace
- with:
- ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }}
-
- - name: Clean workspace
- shell: bash
- run: |
- set -eux
-
- rm -rf "${GITHUB_WORKSPACE}"
- mkdir "${GITHUB_WORKSPACE}"
-
- if [[ ${{ inputs.build_environment }} == 'linux-aarch64-binary-manywheel' ]] || [[ ${{ inputs.build_environment }} == 'linux-s390x-binary-manywheel' ]] ; then
- rm -rf "${RUNNER_TEMP}/artifacts"
- mkdir "${RUNNER_TEMP}/artifacts"
- fi
-
- - name: Checkout PyTorch to pytorch dir
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
-
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
-
- - name: Checkout pytorch/builder to builder dir
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
-
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
-
- - name: Check if the job is disabled
- id: filter
- uses: ./pytorch/.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- # NB: Use a mock test matrix with a default value here. After filtering, if the
- # returned matrix is empty, it means that the job is disabled
- test-matrix: |
- { include: [
- { config: "default" },
- ]}
-
- - name: Pull Docker image
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ inputs.DOCKER_IMAGE }}
-
- - name: Build PyTorch binary
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' }}
- run: |
- set -x
- mkdir -p artifacts/
- container_name=$(docker run \
- -e BINARY_ENV_FILE \
- -e BUILDER_ROOT \
- -e BUILD_ENVIRONMENT \
- -e DESIRED_CUDA \
- -e DESIRED_DEVTOOLSET \
- -e DESIRED_PYTHON \
- -e GITHUB_ACTIONS \
- -e GPU_ARCH_TYPE \
- -e GPU_ARCH_VERSION \
- -e LIBTORCH_VARIANT \
- -e PACKAGE_TYPE \
- -e PYTORCH_FINAL_PACKAGE_DIR \
- -e PYTORCH_ROOT \
- -e SKIP_ALL_TESTS \
- -e PYTORCH_EXTRA_INSTALL_REQUIREMENTS \
- --tty \
- --detach \
- -v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
- -v "${GITHUB_WORKSPACE}/builder:/builder" \
- -v "${RUNNER_TEMP}/artifacts:/artifacts" \
- -w / \
- "${DOCKER_IMAGE}"
- )
- docker exec -t -w "${PYTORCH_ROOT}" "${container_name}" bash -c "bash .circleci/scripts/binary_populate_env.sh"
- if [[ ${BUILD_ENVIRONMENT} == *"aarch64"* ]]; then
- docker exec -t "${container_name}" bash -c "bash /builder/aarch64_linux/aarch64_ci_build.sh"
- else
- docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash /builder/${{ inputs.PACKAGE_TYPE }}/build.sh"
- fi
-
- - name: Chown artifacts
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
- shell: bash
- run: |
- # Ensure the working directory gets chowned back to the current user
- docker run --rm -v "${RUNNER_TEMP}/artifacts:/v" -w /v "${ALPINE_IMAGE}" chown -R "$(id -u):$(id -g)" .
-
- - uses: actions/upload-artifact@v3
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' }}
- with:
- name: ${{ inputs.build_name }}
- if-no-files-found: error
- path:
- ${{ runner.temp }}/artifacts/*
-
- - name: Teardown Linux
- if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
-
- - name: Chown workspace
- if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: ./pytorch/.github/actions/chown-workspace
- with:
- ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }}
-
- - name: Cleanup docker
- if: always() && inputs.build_environment == 'linux-s390x-binary-manywheel'
- shell: bash
- run: |
- # on s390x stop the container for clean worker stop
- # ignore expansion of "docker ps -q" since it could be empty
- # shellcheck disable=SC2046
- docker stop $(docker ps -q) || true
diff --git a/.github/workflows/_binary-test-linux.yml b/.github/workflows/_binary-test-linux.yml
deleted file mode 100644
index 25a6b24223f99..0000000000000
--- a/.github/workflows/_binary-test-linux.yml
+++ /dev/null
@@ -1,226 +0,0 @@
-name: linux-binary-test
-
-on:
- workflow_call:
- inputs:
- build_name:
- required: true
- type: string
- description: The build's name
- build_environment:
- required: true
- type: string
- description: The build environment
- ALPINE_IMAGE:
- required: false
- type: string
- default: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- PYTORCH_ROOT:
- required: true
- type: string
- description: Root directory for the pytorch/pytorch repository
- BUILDER_ROOT:
- required: true
- type: string
- description: Root directory for the pytorch/builder repository
- PACKAGE_TYPE:
- required: true
- type: string
- description: Package type
- DESIRED_CUDA:
- required: true
- type: string
- description: Desired Cuda version
- GPU_ARCH_VERSION:
- required: false
- type: string
- description: GPU Arch version
- GPU_ARCH_TYPE:
- required: true
- type: string
- description: GPU Arch type
- DOCKER_IMAGE:
- required: true
- type: string
- description: Docker image to use
- LIBTORCH_CONFIG:
- required: false
- type: string
- description: Desired libtorch config (for libtorch builds only)
- LIBTORCH_VARIANT:
- required: false
- type: string
- description: Desired libtorch variant (for libtorch builds only)
- DESIRED_DEVTOOLSET:
- required: false
- type: string
- description: Desired dev toolset
- DESIRED_PYTHON:
- required: false
- type: string
- description: Desired python version
- runs_on:
- required: true
- type: string
- description: Hardware to run this job on. Valid values are linux.4xlarge, linux.4xlarge.nvidia.gpu, linux.arm64.2xlarge, and linux.rocm.gpu
- secrets:
- github-token:
- required: true
- description: Github Token
-
-jobs:
- test:
- runs-on: ${{ inputs.runs_on }}
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ inputs.PYTORCH_ROOT }}
- BUILDER_ROOT: ${{ inputs.BUILDER_ROOT }}
- PACKAGE_TYPE: ${{ inputs.PACKAGE_TYPE }}
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: ${{ inputs.DESIRED_CUDA }}
- GPU_ARCH_VERSION: ${{ inputs.GPU_ARCH_VERSION }}
- GPU_ARCH_TYPE: ${{ inputs.GPU_ARCH_TYPE }}
- DOCKER_IMAGE: ${{ inputs.DOCKER_IMAGE }}
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: ${{ inputs.LIBTORCH_CONFIG }}
- LIBTORCH_VARIANT: ${{ inputs.LIBTORCH_VARIANT }}
- DESIRED_DEVTOOLSET: ${{ inputs.DESIRED_DEVTOOLSET }}
- DESIRED_PYTHON: ${{ inputs.DESIRED_PYTHON }}
- # Needed for conda builds
- ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }}
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: ${{ inputs.build_environment }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- steps:
- - name: Make the env permanent during this workflow (but not the secrets)
- shell: bash
- run: |
- {
- echo "PYTORCH_ROOT=${{ env.PYTORCH_ROOT }}"
- echo "BUILDER_ROOT=${{ env.BUILDER_ROOT }}"
- echo "PACKAGE_TYPE=${{ env.PACKAGE_TYPE }}"
-
- echo "DESIRED_CUDA=${{ env.DESIRED_CUDA }}"
- echo "GPU_ARCH_VERSION=${{ env.GPU_ARCH_VERSION }}"
- echo "GPU_ARCH_TYPE=${{ env.GPU_ARCH_TYPE }}"
- echo "DOCKER_IMAGE=${{ env.DOCKER_IMAGE }}"
- echo "SKIP_ALL_TESTS=${{ env.SKIP_ALL_TESTS }}"
- echo "LIBTORCH_CONFIG=${{ env.LIBTORCH_CONFIG }}"
- echo "LIBTORCH_VARIANT=${{ env.LIBTORCH_VARIANT }}"
- echo "DESIRED_DEVTOOLSET=${{ env.DESIRED_DEVTOOLSET }}"
- echo "DESIRED_PYTHON=${{ env.DESIRED_PYTHON }}"
-
- echo "ALPINE_IMAGE=${{ env.ALPINE_IMAGE }}"
- echo "ANACONDA_USER=${{ env.ANACONDA_USER }}"
- echo "AWS_DEFAULT_REGION=${{ env.AWS_DEFAULT_REGION }}"
- echo "BINARY_ENV_FILE=${{ env.BINARY_ENV_FILE }}"
- echo "BUILD_ENVIRONMENT=${{ env.BUILD_ENVIRONMENT }}"
- echo "PR_NUMBER=${{ env.PR_NUMBER }}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- echo "SHA1=${{ env.SHA1 }}"
- } >> "${GITHUB_ENV} }}"
-
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- if: inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.github-token }}
-
- # Setup the environment
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }}
-
- - name: Setup Linux
- if: inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: ./.github/actions/setup-linux
-
- - name: Chown workspace
- if: inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: ./.github/actions/chown-workspace
- with:
- ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }}
-
- - name: Clean workspace
- shell: bash
- run: |
- rm -rf "${GITHUB_WORKSPACE}"
- mkdir "${GITHUB_WORKSPACE}"
-
- - name: Checkout PyTorch to pytorch dir
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
-
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
-
- - name: Checkout pytorch/builder to builder dir
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
-
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
-
- - name: Check if the job is disabled
- id: filter
- uses: ./pytorch/.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- # NB: Use a mock test matrix with a default value here. After filtering, if the
- # returned matrix is empty, it means that the job is disabled
- test-matrix: |
- { include: [
- { config: "default" },
- ]}
-
- - name: Download Build Artifacts
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' }}
- uses: actions/download-artifact@v3
- with:
- name: ${{ inputs.build_name }}
- path: "${{ runner.temp }}/artifacts/"
-
- - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
- uses: pytorch/test-infra/.github/actions/setup-nvidia@main
- if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }}
-
- - name: Pull Docker image
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }}
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ inputs.DOCKER_IMAGE }}
-
- - name: Test Pytorch binary
- if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' }}
- uses: ./pytorch/.github/actions/test-pytorch-binary
-
- - name: Teardown Linux
- if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
-
- - name: Chown workspace
- if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel'
- uses: ./pytorch/.github/actions/chown-workspace
- with:
- ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }}
diff --git a/.github/workflows/_binary-upload.yml b/.github/workflows/_binary-upload.yml
deleted file mode 100644
index 1231dd0e8c7d4..0000000000000
--- a/.github/workflows/_binary-upload.yml
+++ /dev/null
@@ -1,157 +0,0 @@
-name: upload
-
-on:
- workflow_call:
- inputs:
- build_name:
- required: true
- type: string
- description: The build's name
- use_s3:
- type: boolean
- default: true
- description: If true, will download artifacts from s3. Otherwise will use the default GitHub artifact download action
- PYTORCH_ROOT:
- required: false
- type: string
- description: Root directory for the pytorch/pytorch repository. Not actually needed, but currently passing it in since we pass in the same inputs to the reusable workflows of all binary builds
- BUILDER_ROOT:
- required: false
- type: string
- description: Root directory for the pytorch/builder repository. Not actually needed, but currently passing it in since we pass in the same inputs to the reusable workflows of all binary builds
- PACKAGE_TYPE:
- required: true
- type: string
- description: Package type
- DESIRED_CUDA:
- required: true
- type: string
- description: Desired CUDA version
- GPU_ARCH_VERSION:
- required: false
- type: string
- description: GPU Arch version
- GPU_ARCH_TYPE:
- required: true
- type: string
- description: GPU Arch type
- DOCKER_IMAGE:
- required: false
- type: string
- description: Docker image to use
- LIBTORCH_CONFIG:
- required: false
- type: string
- description: Desired libtorch config (for libtorch builds only)
- LIBTORCH_VARIANT:
- required: false
- type: string
- description: Desired libtorch variant (for libtorch builds only)
- DESIRED_DEVTOOLSET:
- required: false
- type: string
- description: Desired dev toolset
- DESIRED_PYTHON:
- required: false
- type: string
- description: Desired python version
- secrets:
- github-token:
- required: true
- description: Github Token
- conda-pytorchbot-token:
- required: true
- description: Conda PyTorchBot token
- conda-pytorchbot-token-test:
- required: true
- description: Conda PyTorchBot token
-
-jobs:
- upload:
- runs-on: ubuntu-22.04
- environment: ${{ (github.event_name == 'push' && (github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/v'))) && 'conda-aws-upload' || '' }}
- container:
- image: continuumio/miniconda3:4.12.0
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: ${{ inputs.PACKAGE_TYPE }}
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: ${{ inputs.DESIRED_CUDA }}
- GPU_ARCH_VERSION: ${{ inputs.GPU_ARCH_VERSION }}
- GPU_ARCH_TYPE: ${{ inputs.GPU_ARCH_TYPE }}
- DOCKER_IMAGE: ${{ inputs.DOCKER_IMAGE }}
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: ${{ inputs.LIBTORCH_CONFIG }}
- LIBTORCH_VARIANT: ${{ inputs.LIBTORCH_VARIANT }}
- DESIRED_DEVTOOLSET: ${{ inputs.DESIRED_DEVTOOLSET }}
- DESIRED_PYTHON: ${{ inputs.DESIRED_PYTHON }}
- ANACONDA_USER: pytorch
- BINARY_ENV_FILE: /tmp/env
- GITHUB_TOKEN: ${{ secrets.github-token }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- no-sudo: true
-
- - name: Configure AWS credentials(PyTorch account) for nightly
- if: ${{ github.event_name == 'push' && github.event.ref == 'refs/heads/nightly' }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: arn:aws:iam::749337293305:role/gha_workflow_nightly_build_wheels
- aws-region: us-east-1
-
- - name: Configure AWS credentials(PyTorch account) for RC builds
- if: ${{ github.event_name == 'push' && (startsWith(github.event.ref, 'refs/tags/') && !startsWith(github.event.ref, 'refs/tags/ciflow/')) }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: arn:aws:iam::749337293305:role/gha_workflow_test_build_wheels
- aws-region: us-east-1
-
- - name: Download Build Artifacts
- id: download-artifacts
- # NB: When the previous build job is skipped, there won't be any artifacts and
- # this step will fail. Binary build jobs can only be skipped on CI, not nightly
- continue-on-error: true
- uses: actions/download-artifact@v3
- with:
- name: ${{ inputs.build_name }}
- path: "${{ runner.temp }}/artifacts/"
-
- - name: Set DRY_RUN (only for tagged pushes)
- if: ${{ github.event_name == 'push' && (github.event.ref == 'refs/heads/nightly' || (startsWith(github.event.ref, 'refs/tags/') && !startsWith(github.event.ref, 'refs/tags/ciflow/'))) }}
- run: |
- echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
-
- - name: Set UPLOAD_CHANNEL (only for tagged pushes)
- if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/') && !startsWith(github.event.ref, 'refs/tags/ciflow/') }}
- shell: bash -e -l {0}
- run: |
- # reference ends with an RC suffix
- if [[ "${GITHUB_REF_NAME}" = *-rc[0-9]* ]]; then
- echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
- fi
-
- - name: Upload binaries
- if: steps.download-artifacts.outcome && steps.download-artifacts.outcome == 'success'
- shell: bash
- env:
- PKG_DIR: "${{ runner.temp }}/artifacts"
- UPLOAD_SUBFOLDER: "${{ env.DESIRED_CUDA }}"
- # When running these on pull_request events these should be blank
- CONDA_PYTORCHBOT_TOKEN: ${{ secrets.conda-pytorchbot-token }}
- CONDA_PYTORCHBOT_TOKEN_TEST: ${{ secrets.conda-pytorchbot-token-test }}
- BUILD_NAME: ${{ inputs.build_name }}
- run: |
- set -ex
- if [[ "${GITHUB_REF_NAME}" = *-rc[0-9]* ]]; then
- export ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN_TEST}"
- else
- export ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN}"
- fi
- bash .circleci/scripts/binary_upload.sh
diff --git a/.github/workflows/_buck-build-test.yml b/.github/workflows/_buck-build-test.yml
deleted file mode 100644
index 43eb72fc9181b..0000000000000
--- a/.github/workflows/_buck-build-test.yml
+++ /dev/null
@@ -1,129 +0,0 @@
-name: buck
-
-on:
- workflow_call:
- inputs:
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
-
-defaults:
- run:
- shell: bash -e -l {0}
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- buck-build-test:
- needs: filter
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Set up JDK 8
- uses: actions/setup-java@v3
- with:
- java-version: '8'
- distribution: 'temurin'
-
- - name: Setup miniconda
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: 3.8
- environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
-
- - name: Install Buck
- uses: nick-fields/retry@3e91a01664abd3c5cd539100d10d33b9c5b68482
- with:
- timeout_minutes: 10
- max_attempts: 5
- command: |
- sudo apt update -q
- wget -q https://github.com/facebook/buck/releases/download/v2021.01.12.01/buck.2021.01.12.01_all.deb
- sudo apt install ./buck.2021.01.12.01_all.deb
-
- - name: Download third party libraries and generate wrappers
- uses: nick-fields/retry@3e91a01664abd3c5cd539100d10d33b9c5b68482
- with:
- timeout_minutes: 10
- max_attempts: 5
- command: |
- bash scripts/buck_setup.sh
-
- - name: Build tools
- run: |
- buck build tools: --keep-going
-
- - name: Run tools tests
- run: |
- buck test tools:selective_build_test tools:gen_oplist_test tools:gen_operators_yaml_test
-
- - name: Build c10
- run: |
- buck build c10:c10
-
- - name: Build XNNPACK
- run: |
- buck build third_party:XNNPACK
-
- - name: Build QNNPACK
- run: |
- buck build aten/src/ATen/native/quantized/cpu/qnnpack:pytorch_qnnpack
-
- - name: Test QNNPACK
- run: |
- buck test aten/src/ATen/native/quantized/cpu/qnnpack:pytorch_qnnpack_test
-
- - name: Build aten_cpu
- run: |
- buck build :aten_cpu
-
- - name: Build torch_mobile_core
- run: |
- buck build :torch_mobile_core
-
- - name: Build pt_ops_full
- run: |
- buck build :pt_ops_full
-
- - name: Build mobile benchmark
- run: |
- buck build :ptmobile_benchmark
-
- - name: Run lite interpreter model
- run: |
- buck run :ptmobile_benchmark -- --model=ios/TestApp/models/mobilenet_v2.ptl --input_dims=1,3,224,224 --input_type=float
-
- - name: Build everything
- run: |
- buck build //... --keep-going
-
- - name: Build aten_cpu@shared
- run: |
- buck build :aten_cpu#linux-x86_64,shared
diff --git a/.github/workflows/_docs.yml b/.github/workflows/_docs.yml
deleted file mode 100644
index 069bcb4d2a14e..0000000000000
--- a/.github/workflows/_docs.yml
+++ /dev/null
@@ -1,222 +0,0 @@
-name: build docs
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image:
- required: true
- type: string
- description: Docker image to run in.
- push:
- required: false
- type: boolean
- default: false
- description: If set, push the docs to the docs website.
- run-doxygen:
- required: false
- type: boolean
- default: false
- description: If set, will enable C++ API doc generation using doxygen / breathe / exhale.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- upload-aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- GH_PYTORCHBOT_TOKEN:
- required: false
- description: Permissions for pushing to the docs site.
-
-jobs:
- build-docs:
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch'
- runs-on: ${{ matrix.runner }}
- environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'pytorchbot-env' || '' }}
- strategy:
- fail-fast: false
- matrix:
- include:
- - docs_type: cpp
- # We recently seeing lots of exit code 137 running this in Docker indicating
- # an OOM issue when running the job, so this upgrades the runner from 4xlarge
- # to the next available tier of 12xlarge. So much memory just to generate cpp
- # doc
- runner: linux.12xlarge
- # TODO: Nightly cpp docs take longer and longer to finish (more than 3h now)
- # Let's try to figure out how this can be improved
- timeout-minutes: 240
- - docs_type: python
- runner: linux.2xlarge
- # It takes less than 30m to finish python docs unless there are issues
- timeout-minutes: 30
- - docs_type: functorch
- runner: linux.2xlarge
- # It takes less than 15m to finish functorch docs unless there are issues
- timeout-minutes: 15
- # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180)
- # The current name requires updating the Rockset last docs push query from test-infra every time the matrix is updated
- name: build-docs-${{ matrix.docs_type }}-${{ inputs.push }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- instructions: |
- All builds are done inside the container, to start an interactive session run:
- docker exec -it $(docker container ps --format '{{.ID}}') bash
- To start Python docs build type:
- cd docs && make html && make coverage
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: configure aws credentials
- if : ${{ inputs.aws-role-to-assume != '' }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: ${{ inputs.aws-role-to-assume }}
- role-session-name: gha-linux-test
- aws-region: us-east-1
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image }}
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Download build artifacts
- uses: ./.github/actions/download-build-artifacts
- with:
- name: ${{ inputs.build-environment }}
- s3-bucket: ${{ inputs.s3-bucket }}
-
- - name: Generate netrc (only for docs-push)
- if: inputs.push
- env:
- GITHUB_PYTORCHBOT_TOKEN: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
- run: |
- # sometimes .netrc exists as a directory even though this is the temp folder
- rm -rf "${RUNNER_TEMP}/.netrc"
- # set credentials for https pushing
- echo "machine github.com" > "${RUNNER_TEMP}/.netrc"
- echo "login pytorchbot" >> "${RUNNER_TEMP}/.netrc"
- echo "password ${GITHUB_PYTORCHBOT_TOKEN}" >> "${RUNNER_TEMP}/.netrc"
-
- - name: Build ${{ matrix.docs_type }} docs
- timeout-minutes: ${{ matrix.timeout-minutes }}
- id: build-docs
- env:
- # After https://github.com/pytorch/pytorch/pull/88373, pull workflow can now be run periodically,
- # so using a schedule event to determine if the docs should be pushed or not doesn't hold true
- # anymore
- WITH_PUSH: ${{ inputs.push }}
- DOCKER_IMAGE: ${{ inputs.docker-image }}
- DOCS_TYPE: ${{ matrix.docs_type }}
- RUN_DOXYGEN: ${{ inputs.run-doxygen }}
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- run: |
- set -ex
- # Convert refs/tags/v1.12.0rc3 into 1.12
- if [[ "${GITHUB_REF}" =~ ^refs/tags/v([0-9]+\.[0-9]+)\.* ]]; then
- target="${BASH_REMATCH[1]}"
- else
- target="main"
- fi
- # detached container should get cleaned up by teardown_ec2_linux
- container_name=$(docker run \
- -e BUILD_ENVIRONMENT \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e SHA1="$GITHUB_SHA" \
- -e DOCS_VERSION="${target}" \
- -e DOCS_TYPE \
- -e RUN_DOXYGEN \
- -e WITH_PUSH \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --tty \
- --detach \
- --user jenkins \
- -v "${RUNNER_TEMP}/.netrc":/var/lib/jenkins/.netrc \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- docker exec -t "${container_name}" bash -c "sudo chown -R jenkins . && pip install $(echo dist/*.whl)[opt-einsum] && ./.ci/pytorch/${DOCS_TYPE}_doc_push_script.sh"
-
- - name: Chown workspace
- uses: ./.github/actions/chown-workspace
- if: always()
-
- - name: configure aws credentials
- if : ${{ inputs.upload-aws-role-to-assume != '' }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: ${{ inputs.upload-aws-role-to-assume }}
- role-session-name: gha-linux-test
- aws-region: us-east-1
-
- - name: Upload Python Docs Preview
- uses: seemethere/upload-artifact-s3@v5
- if: ${{ github.event_name == 'pull_request' && matrix.docs_type == 'python' && steps.build-docs.outcome == 'success' }}
- with:
- retention-days: 14
- s3-bucket: doc-previews
- if-no-files-found: error
- path: pytorch_docs/main/
- s3-prefix: pytorch/pytorch/${{ github.event.pull_request.number }}
-
- - name: Upload C++ Docs Preview
- uses: seemethere/upload-artifact-s3@v5
- if: ${{ github.event_name == 'pull_request' && matrix.docs_type == 'cpp' && steps.build-docs.outcome == 'success' }}
- with:
- retention-days: 14
- if-no-files-found: error
- s3-bucket: doc-previews
- path: cppdocs/
- s3-prefix: pytorch/pytorch/${{ github.event.pull_request.number }}/cppdocs
-
- - name: Upload functorch Docs Preview
- uses: seemethere/upload-artifact-s3@v5
- if: ${{ github.event_name == 'pull_request' && matrix.docs_type == 'functorch' && steps.build-docs.outcome == 'success' }}
- with:
- retention-days: 14
- s3-bucket: doc-previews
- if-no-files-found: error
- path: functorch_ghpages/nightly/
- s3-prefix: pytorch/pytorch/${{ github.event.pull_request.number }}/functorchdocs
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/_ios-build-test.yml b/.github/workflows/_ios-build-test.yml
deleted file mode 100644
index 0282a0482104d..0000000000000
--- a/.github/workflows/_ios-build-test.yml
+++ /dev/null
@@ -1,464 +0,0 @@
-name: ios-build-test
-
-on:
- workflow_call:
- inputs:
- trigger-event:
- type: string
- default: ""
- description: |
- The trigger event from the caller that determines whether or not to upload
- build-environment:
- required: true
- type: string
- description: Top-level label for what is being built/tested.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
- secrets:
- AWS_PYTORCH_MOBILE_UPLOADER_ACCESS_KEY_ID:
- required: false
- AWS_PYTORCH_MOBILE_UPLOADER_SECRET_ACCESS_KEY:
- required: false
- COCOAPODS_TRUNK_TOKEN:
- required: false
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- build:
- needs: filter
- # Don't run on forked repos
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- env:
- IOS_PLATFORM: ${{ matrix.ios_platform }}
- IOS_ARCH: ${{ matrix.ios_arch }}
- BUILD_LITE_INTERPRETER: ${{ matrix.use_lite_interpreter }}
- USE_PYTORCH_METAL: ${{ matrix.use_metal }}
- USE_COREML_DELEGATE: ${{ matrix.use_coreml }}
- CUSTOM_OP_LIST: ${{ matrix.use_custom_op_list }}
- # TODO: Bump it to 2.2.0 after cherry pick this or figure out a better way
- # to get this version instead of hard coding it here
- PYTORCH_VERSION: 2.1.0
- timeout-minutes: 240
- steps:
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Populate CI build options
- shell: bash
- run: |
- set -ex
-
- if [ -n "${CUSTOM_OP_LIST:-}" ]; then
- echo "SELECTED_OP_LIST=${GITHUB_WORKSPACE}/ios/TestApp/custom_build/${CUSTOM_OP_LIST}" >> "${GITHUB_ENV}"
- fi
-
- - name: Install brew dependencies
- uses: nick-fields/retry@v2.8.2
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- # Install dependencies
- brew install libtool
-
- - name: Setup miniconda for iOS
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: "3.9"
- environment-file: .github/requirements/conda-env-iOS.txt
- pip-requirements-file: .github/requirements/pip-requirements-iOS.txt
-
- - name: Setup Fastlane
- uses: nick-fields/retry@v2.8.2
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- set -x
-
- pushd ios/TestApp
- # Install fastlane
- sudo gem install bundler && bundle install
- bundle update fastlane
- popd
-
- - name: Build PyTorch mobile runtime
- shell: bash
- run: |
- set -eux
- # shellcheck disable=SC1091
- export TCLLIBPATH="/usr/local/lib"
- ${CONDA_RUN} scripts/build_ios.sh
-
- - name: Prepare the test models
- shell: bash
- working-directory: ${{ github.workspace }}/ios/TestApp/benchmark
- run: |
- set -eux
- # shellcheck disable=SC1091
- # Use the pytorch nightly build to generate models
- ${CONDA_RUN} pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu
-
- # Generate models for different backends
- mkdir -p ../models
- # NB: Both of the following scripts only export models with lite interpreter
- if [ "${USE_COREML_DELEGATE}" == 1 ]; then
- ${CONDA_RUN} python coreml_backend.py
- else
- pushd "${GITHUB_WORKSPACE}"
- ${CONDA_RUN} python test/mobile/model_test/gen_test_model.py ios-test
- popd
- fi
-
- if [ "${BUILD_LITE_INTERPRETER}" == 1 ]; then
- echo "Setting up the TestApp for LiteInterpreter"
- ruby setup.rb --lite 1
- else
- # Generate some models for JIT without lite interpreter
- ${CONDA_RUN} python trace_model.py
-
- echo "Setting up the TestApp for Full JIT"
- ruby setup.rb
- fi
-
- - name: Build TestApp
- if: matrix.ios_platform == 'SIMULATOR'
- timeout-minutes: 15
- shell: bash
- run: |
- set -eux
-
- # Run the ruby build script
- if ! [ -x "$(command -v xcodebuild)" ]; then
- echo 'Error: xcodebuild is not installed.'
- exit 1
- fi
- ruby scripts/xcode_build.rb -i build_ios/install -x ios/TestApp/TestApp.xcodeproj -p "${IOS_PLATFORM}"
-
- - name: Run simulator tests
- if: matrix.ios_platform == 'SIMULATOR'
- shell: bash
- working-directory: ${{ github.workspace }}/ios/TestApp
- run: |
- set -eux
-
- # Instruments -s -devices
- if [ "${BUILD_LITE_INTERPRETER}" == 1 ]; then
- if [ "${USE_COREML_DELEGATE}" == 1 ]; then
- bundle exec fastlane scan --only_testing TestAppTests/TestAppTests/testCoreML
- else
- bundle exec fastlane scan --skip_testing TestAppTests/TestAppTests/testCoreML
- fi
- else
- bundle exec fastlane scan --only_testing TestAppTests/TestAppTests/testFullJIT
- fi
-
- - name: Dump simulator tests on failure
- if: failure() && matrix.ios_platform == 'SIMULATOR'
- run: |
- echo "Simulator Tests Logs:"
- cat /Users/runner/Library/Logs/scan/*.log
-
- - name: Prepare the build artifacts for upload
- if: matrix.ios_platform == 'OS'
- shell: bash
- run: |
- set -eux
-
- # The structure of the folder is as follows:
- #
- # RUNNER_TEMP/
- # └── IOS_ARCH/
- # ├── LICENSE
- # ├── install
- # │ ├── include
- # │ │ └── headers
- # │ └── lib
- # │ ├── libXNNPACK.a
- # │ ├── libc10.a
- # │ ├── libclog.a
- # │ ├── libcpuinfo.a
- # │ ├── libeigen_blas.a
- # │ ├── libpthreadpool.a
- # │ ├── libpytorch_qnnpack.a
- # │ ├── libtorch.a
- # │ └── libtorch_cpu.a
- # ├── src
- # │ └── LibTorch-Lite.h
- # └── version.txt
- SETUP_DIR="${RUNNER_TEMP}/${IOS_ARCH}"
- mkdir -p "${SETUP_DIR}/src"
-
- cp -R "${GITHUB_WORKSPACE}/build_ios/install" "${SETUP_DIR}"
- # Copy the umbrella header and license
- if [ "${BUILD_LITE_INTERPRETER}" == 1 ]; then
- cp "${GITHUB_WORKSPACE}/ios/LibTorch-Lite.h" "${SETUP_DIR}/src"
- else
- cp "${GITHUB_WORKSPACE}/ios/LibTorch.h" "${SETUP_DIR}/src"
- fi
-
- # Copy license and version
- cp "${GITHUB_WORKSPACE}/LICENSE" "${SETUP_DIR}"
- echo "${PYTORCH_VERSION}" > "${SETUP_DIR}"/version.txt
-
- # Save the podspec for the upload job later
- if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
- DATE=$(date -u +%Y%m%d)
- cp "${GITHUB_WORKSPACE}"/ios/LibTorch-Lite-Nightly.podspec.template "${SETUP_DIR}"/LibTorch-Lite-Nightly.podspec
- sed -i '' -e "s/IOS_NIGHTLY_BUILD_VERSION/${PYTORCH_VERSION}.${DATE}/g" "${SETUP_DIR}"/LibTorch-Lite-Nightly.podspec
-
- cp "${GITHUB_WORKSPACE}"/ios/LibTorch-Lite.podspec.template "${SETUP_DIR}"/LibTorch-Lite.podspec
- sed -i '' -e "s/IOS_BUILD_VERSION/${PYTORCH_VERSION}/g" "${SETUP_DIR}"/LibTorch-Lite.podspec
- else
- # NB: There is no nightly build without lite interpreter atm
- cp "${GITHUB_WORKSPACE}"/ios/LibTorch.podspec.template "${SETUP_DIR}"/LibTorch.podspec
- sed -i '' -e "s/IOS_BUILD_VERSION/${PYTORCH_VERSION}/g" "${SETUP_DIR}"/LibTorch.podspec
- fi
-
- pushd "${SETUP_DIR}"
- # NB: It's important to zip all the files before uploading because the GHA will upload
- # all files sequentially which is both slow and has too many requests. More info is at
- # https://github.com/actions/upload-artifact#too-many-uploads-resulting-in-429-responses
- zip -r "${IOS_ARCH}.zip" install src version.txt LICENSE ./*.podspec
- popd
-
- - uses: actions/upload-artifact@v3
- if: matrix.ios_platform == 'OS'
- with:
- name: pytorch-ios-build-artifacts-${{ matrix.ios_arch }}
- if-no-files-found: error
- path: ${{ runner.temp }}/${{ matrix.ios_arch }}/${{ matrix.ios_arch }}.zip
-
- upload-ios-artifacts:
- # NB: this job run on GitHub MacOS ephemeral runner so that it can access AWS credentials
- runs-on: ubuntu-22.04
- needs: build
- # NB: Only upload release build, if we need it, we could also turn on nightly here
- environment: ${{ ((inputs.trigger-event == 'push' || inputs.trigger-event == 'workflow_dispatch') && (github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/v'))) && 'ios-upload' || '' }}
- steps:
- - uses: actions/checkout@v3
-
- # For awscli S3 upload
- - uses: actions/setup-python@v4
- with:
- python-version: '3.10'
- cache: pip
-
- # For cocoapods pod upload
- - uses: ruby/setup-ruby@v1
- with:
- ruby-version: '3.2'
- bundler-cache: true
-
- - name: Download arm64 artifacts
- uses: actions/download-artifact@v3
- with:
- name: pytorch-ios-build-artifacts-arm64
-
- - name: Unzip artifacts
- shell: bash
- run: |
- set -eux
-
- ARCH="arm64"
- TMP_DIR="${RUNNER_TEMP}/${ARCH}"
- mkdir -p "${TMP_DIR}"
-
- cp "${ARCH}.zip" "${TMP_DIR}"
-
- pushd "${TMP_DIR}"
- unzip -o "${ARCH}.zip"
- popd
-
- - name: Prepare the artifact
- env:
- IS_NIGHTLY: ${{ github.event.ref == 'refs/heads/nightly' }}
- shell: bash
- working-directory: ${{ runner.temp }}/arm64
- run: |
- set -eux
-
- DEST_DIR="${RUNNER_TEMP}"/ios
- echo "DEST_DIR=${DEST_DIR}" >> "$GITHUB_ENV"
-
- # Prepare all the sub directories
- mkdir -p "${DEST_DIR}"/install/lib
-
- # Copy header and share files
- cp -R install/include "${DEST_DIR}"/install
- cp -R install/share "${DEST_DIR}"/install
- # The last dash is important to copy only files under src
- cp -R src "${DEST_DIR}"
- cp LICENSE "${DEST_DIR}"
-
- if [ "${IS_NIGHTLY}" == true ]; then
- PYTORCH_VERSION=$(cat version.txt)
- DATE=$(date -u +%Y%m%d)
- echo "${PYTORCH_VERSION}.${DATE}" > "${DEST_DIR}"/version.txt
- else
- cp version.txt "${DEST_DIR}"
- fi
- PYTORCH_VERSION=$(cat "${DEST_DIR}"/version.txt)
- echo "PYTORCH_VERSION=${PYTORCH_VERSION}" >> "$GITHUB_ENV"
-
- pushd install/lib
- # shellcheck disable=SC2207
- LIBRARIES=($(ls ./*.a))
- popd
-
- for LIB in "${LIBRARIES[@]}"; do
- cp "${RUNNER_TEMP}"/arm64/install/lib/"${LIB}" "${DEST_DIR}"/install/lib/"${LIB}"
- done
-
- BUILD_LITE_INTERPRETER=1
- if [ -f "${RUNNER_TEMP}"/arm64/LibTorch.podspec ]; then
- # If LibTorch.podspec is used instead of LibTorch-Lite.podspec, the artifact is built
- # without lite interpreter
- BUILD_LITE_INTERPRETER=0
- fi
- echo "BUILD_LITE_INTERPRETER=${BUILD_LITE_INTERPRETER}" >> "$GITHUB_ENV"
-
- - name: Prepare the podspec
- env:
- IS_NIGHTLY: ${{ github.event.ref == 'refs/heads/nightly' }}
- shell: bash
- working-directory: ${{ env.DEST_DIR }}
- run: |
- set -eux
-
- ARTIFACT_NAME=libtorch
- SPEC_NAME=LibTorch
-
- if [ "${BUILD_LITE_INTERPRETER}" == "1" ]; then
- ARTIFACT_NAME="${ARTIFACT_NAME}_lite_ios"
- SPEC_NAME="${SPEC_NAME}-Lite"
- else
- ARTIFACT_NAME="${ARTIFACT_NAME}_ios"
- fi
-
- if [ "${IS_NIGHTLY}" == true ]; then
- ARTIFACT_NAME="${ARTIFACT_NAME}_nightly_${PYTORCH_VERSION}.zip"
- SPEC_NAME="${SPEC_NAME}-Nightly"
- else
- ARTIFACT_NAME="${ARTIFACT_NAME}_${PYTORCH_VERSION}.zip"
- fi
-
- SPEC_NAME_WITH_VERSION="${SPEC_NAME}-${PYTORCH_VERSION}.podspec"
- SPEC_NAME="${SPEC_NAME}.podspec"
-
- # Also copy the spec file
- cp "${RUNNER_TEMP}"/arm64/"${SPEC_NAME}" "${SPEC_NAME_WITH_VERSION}"
-
- # NB: It's important to zip all the files before uploading because the GHA will upload
- # all files sequentially which is both slow and has too many requests. More info is at
- # https://github.com/actions/upload-artifact#too-many-uploads-resulting-in-429-responses
- zip -r "${ARTIFACT_NAME}" install src version.txt LICENSE
-
- {
- echo "ARTIFACT_NAME=${ARTIFACT_NAME}"
- echo "SPEC_NAME_WITH_VERSION=${SPEC_NAME_WITH_VERSION}"
- echo "SPEC_NAME=${SPEC_NAME}"
- } >> "$GITHUB_ENV"
-
- - uses: actions/upload-artifact@v3
- with:
- name: pytorch-ios-artifacts
- if-no-files-found: error
- path: ${{ env.DEST_DIR }}/${{ env.ARTIFACT_NAME }}
-
- - uses: actions/upload-artifact@v3
- with:
- name: pytorch-ios-podspec
- if-no-files-found: error
- path: ${{ env.DEST_DIR }}/${{ env.SPEC_NAME_WITH_VERSION }}
-
- - name: Set DRY_RUN
- if: ${{ (inputs.trigger-event == 'push' || inputs.trigger-event == 'workflow_dispatch') && (github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/v')) }}
- shell: bash
- run: |
- echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
-
- - name: Upload the artifact to S3
- env:
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_MOBILE_UPLOADER_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_MOBILE_UPLOADER_SECRET_ACCESS_KEY }}
- IS_NIGHTLY: ${{ github.event.ref == 'refs/heads/nightly' }}
- shell: bash
- working-directory: ${{ env.DEST_DIR }}
- run: |
- set -eux
-
- pip install -q awscli==1.29.40
-
- DRY_RUN=${DRY_RUN:-enabled}
- AWS_S3_CP="aws s3 cp --dryrun"
- if [ "${DRY_RUN}" == "disabled" ]; then
- AWS_S3_CP="aws s3 cp"
- fi
-
- if [ "${IS_NIGHTLY}" == true ]; then
- BUCKET_NAME="ossci-ios-build"
- else
- BUCKET_NAME="ossci-ios"
- fi
-
- ${AWS_S3_CP} "${ARTIFACT_NAME}" "s3://${BUCKET_NAME}/" --acl public-read
- ${AWS_S3_CP} "${SPEC_NAME_WITH_VERSION}" "s3://${BUCKET_NAME}/" --acl public-read
-
- - name: Upload the artifact to cocoapods (nightly only)
- env:
- # We need to set this secret to upload to cocoapods. However, we might want
- # to NOT set this for PROD release so that we can upload the artifacts manually
- COCOAPODS_TRUNK_TOKEN: ${{ secrets.COCOAPODS_TRUNK_TOKEN || '' }}
- if: ${{ (inputs.trigger-event == 'push' || inputs.trigger-event == 'workflow_dispatch') && github.event.ref == 'refs/heads/nightly' && env.COCOAPODS_TRUNK_TOKEN != '' }}
- shell: bash
- working-directory: ${{ runner.temp }}/arm64
- run: |
- set -eux
-
- gem install cocoapods
-
- pod trunk me
- # Upload the spec to cocoapods
- pod trunk push --verbose --allow-warnings --use-libraries --skip-import-validation "${SPEC_NAME}"
diff --git a/.github/workflows/_linux-build-label.yml b/.github/workflows/_linux-build-label.yml
deleted file mode 100644
index 427f993b48530..0000000000000
--- a/.github/workflows/_linux-build-label.yml
+++ /dev/null
@@ -1,109 +0,0 @@
-name: linux-build
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image-name:
- required: true
- type: string
- description: Name of the base docker image to build with.
- build-generates-artifacts:
- required: false
- type: boolean
- default: true
- description: If set, upload generated build artifacts.
- build-with-debug:
- required: false
- type: boolean
- default: false
- description: If set, build in debug mode.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- cuda-arch-list:
- required: false
- type: string
- default: "5.2"
- description: Runner label to select worker type
- runner:
- required: false
- type: string
- default: "linux.2xlarge"
- description: |
- List of CUDA architectures CI build should target.
- test-matrix:
- required: false
- type: string
- description: |
- An option JSON description of what test configs to run later on. This
- is moved here from the Linux test workflow so that we can apply filter
- logic using test-config labels earlier and skip unnecessary builds
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- HUGGING_FACE_HUB_TOKEN:
- required: false
- description: |
- HF Auth token to avoid rate limits when downloading models or datasets from hub
-
- outputs:
- docker-image:
- value: ${{ jobs.build.outputs.docker-image }}
- description: The docker image containing the built PyTorch.
- test-matrix:
- value: ${{ jobs.build.outputs.test-matrix }}
- description: An optional JSON description of what test configs to run later on.
-
-jobs:
- build:
- # Don't run on forked repos
- if: github.repository_owner == 'pytorch'
- runs-on: ${{ inputs.runner }}
- timeout-minutes: 240
- outputs:
- docker-image: ${{ steps.linux-build.outputs.docker-image }}
- test-matrix: ${{ steps.linux-build.outputs.test-matrix }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- # [pytorch repo ref]
- # Use a pytorch/pytorch reference instead of a reference to the local
- # checkout because when we run this action we don't *have* a local
- # checkout. In other cases you should prefer a local checkout.
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Linux Build
- id: linux-build
- uses: ./.github/actions/linux-build
- with:
- build-environment: ${{ inputs.build-environment }}
- docker-image-name: ${{ inputs.docker-image-name }}
- build-generates-artifacts: ${{ inputs.build-generates-artifacts }}
- build-with-debug: ${{ inputs.build-with-debug }}
- sync-tag: ${{ inputs.sync-tag }}
- cuda-arch-list: ${{ inputs.cuda-arch-list }}
- test-matrix: ${{ inputs.test-matrix }}
- s3-bucket: ${{ inputs.s3-bucket }}
- aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/_linux-build-rg.yml b/.github/workflows/_linux-build-rg.yml
deleted file mode 100644
index 6c6a4827e1672..0000000000000
--- a/.github/workflows/_linux-build-rg.yml
+++ /dev/null
@@ -1,105 +0,0 @@
-name: linux-build-rg
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image-name:
- required: true
- type: string
- description: Name of the base docker image to build with.
- build-generates-artifacts:
- required: false
- type: boolean
- default: true
- description: If set, upload generated build artifacts.
- build-with-debug:
- required: false
- type: boolean
- default: false
- description: If set, build in debug mode.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- cuda-arch-list:
- required: false
- type: string
- default: "5.2"
- description: |
- List of CUDA architectures CI build should target.
- runner-group:
- required: false
- type: string
- default: "arc-lf-linux.2xlarge"
- description: Runner group to select group type
- test-matrix:
- required: false
- type: string
- description: |
- An option JSON description of what test configs to run later on. This
- is moved here from the Linux test workflow so that we can apply filter
- logic using test-config labels earlier and skip unnecessary builds
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- HUGGING_FACE_HUB_TOKEN:
- required: false
- description: |
- HF Auth token to avoid rate limits when downloading models or datasets from hub
-
- outputs:
- docker-image:
- value: ${{ jobs.build.outputs.docker-image }}
- description: The docker image containing the built PyTorch.
- test-matrix:
- value: ${{ jobs.build.outputs.test-matrix }}
- description: An optional JSON description of what test configs to run later on.
-
-jobs:
- build:
- # Don't run on forked repos
- if: github.repository_owner == 'pytorch'
- runs-on:
- group: ${{ inputs.runner-group }}
- timeout-minutes: 240
- outputs:
- docker-image: ${{ steps.linux-build.outputs.docker-image }}
- test-matrix: ${{ steps.linux-build.outputs.test-matrix }}
- steps:
- # [pytorch repo ref]
- # Use a pytorch/pytorch reference instead of a reference to the local
- # checkout because when we run this action we don't *have* a local
- # checkout. In other cases you should prefer a local checkout.
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Linux Build
- id: linux-build
- uses: ./.github/actions/linux-build
- with:
- build-environment: ${{ inputs.build-environment }}
- docker-image-name: ${{ inputs.docker-image-name }}
- build-generates-artifacts: ${{ inputs.build-generates-artifacts }}
- build-with-debug: ${{ inputs.build-with-debug }}
- sync-tag: ${{ inputs.sync-tag }}
- cuda-arch-list: ${{ inputs.cuda-arch-list }}
- test-matrix: ${{ inputs.test-matrix }}
- s3-bucket: ${{ inputs.s3-bucket }}
- aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/_linux-build.yml b/.github/workflows/_linux-build.yml
deleted file mode 100644
index c3bcb0d888dfc..0000000000000
--- a/.github/workflows/_linux-build.yml
+++ /dev/null
@@ -1,242 +0,0 @@
-name: linux-build
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- docker-image-name:
- required: true
- type: string
- description: Name of the base docker image to build with.
- build-generates-artifacts:
- required: false
- type: boolean
- default: true
- description: If set, upload generated build artifacts.
- build-with-debug:
- required: false
- type: boolean
- default: false
- description: If set, build in debug mode.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- cuda-arch-list:
- required: false
- type: string
- default: "5.2"
- description: |
- List of CUDA architectures CI build should target.
- runner:
- required: false
- type: string
- default: "linux.2xlarge"
- description: |
- List of CUDA architectures CI build should target.
- test-matrix:
- required: false
- type: string
- description: |
- An option JSON description of what test configs to run later on. This
- is moved here from the Linux test workflow so that we can apply filter
- logic using test-config labels earlier and skip unnecessary builds
- selected-test-configs:
- description: |
- A comma-separated list of test configurations from the test matrix to keep,
- The empty list means we are going to keep every configurations by defaults
- required: false
- type: string
- default: ""
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: Role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- HUGGING_FACE_HUB_TOKEN:
- required: false
- description: |
- HF Auth token to avoid rate limits when downloading models or datasets from hub
-
-
- outputs:
- docker-image:
- value: ${{ jobs.build.outputs.docker-image }}
- description: The docker image containing the built PyTorch.
- test-matrix:
- value: ${{ jobs.build.outputs.test-matrix }}
- description: An optional JSON description of what test configs to run later on.
-
-jobs:
- build:
- # Don't run on forked repos
- if: github.repository_owner == 'pytorch'
- runs-on: ${{ inputs.runner }}
- timeout-minutes: 240
- outputs:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- # [pytorch repo ref]
- # Use a pytorch/pytorch reference instead of a reference to the local
- # checkout because when we run this action we don't *have* a local
- # checkout. In other cases you should prefer a local checkout.
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: configure aws credentials
- uses: aws-actions/configure-aws-credentials@v3
- if: ${{ inputs.aws-role-to-assume != '' }}
- with:
- role-to-assume: ${{ inputs.aws-role-to-assume }}
- role-session-name: gha-linux-build
- aws-region: us-east-1
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image-name }}
-
- - name: Use following to pull public copy of the image
- id: print-ghcr-mirror
- env:
- ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- shell: bash
- run: |
- tag=${ECR_DOCKER_IMAGE##*/}
- echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- # Apply the filter logic to the build step too if the test-config label is already there
- - name: Select all requested test configurations (if the test matrix is available)
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- selected-test-configs: ${{ inputs.selected-test-configs }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Download pytest cache
- uses: ./.github/actions/pytest-cache-download
- continue-on-error: true
- with:
- cache_dir: .pytest_cache
- job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
- s3_bucket: ${{ inputs.s3-bucket }}
-
- - name: Build
- if: steps.filter.outputs.is-test-matrix-empty == 'False' || inputs.test-matrix == ''
- id: build
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- # TODO duplicated
- AWS_DEFAULT_REGION: us-east-1
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- SCCACHE_S3_KEY_PREFIX: ${{ github.workflow }}
- XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
- PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }}
- TORCH_CUDA_ARCH_LIST: ${{ inputs.cuda-arch-list }}
- DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }}
- DEBUG: ${{ inputs.build-with-debug && '1' || '0' }}
- OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- run: |
- # detached container should get cleaned up by teardown_ec2_linux
- container_name=$(docker run \
- -e BUILD_ENVIRONMENT \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e AWS_DEFAULT_REGION \
- -e PR_NUMBER \
- -e SHA1 \
- -e BRANCH \
- -e SCCACHE_BUCKET \
- -e SCCACHE_S3_KEY_PREFIX \
- -e XLA_CUDA \
- -e XLA_CLANG_CACHE_S3_BUCKET_NAME \
- -e SKIP_SCCACHE_INITIALIZATION=1 \
- -e TORCH_CUDA_ARCH_LIST \
- -e PR_LABELS \
- -e OUR_GITHUB_JOB_ID \
- -e HUGGING_FACE_HUB_TOKEN \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --tty \
- --detach \
- --user jenkins \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- docker exec -t "${container_name}" sh -c '.ci/pytorch/build.sh'
-
- - name: Archive artifacts into zip
- if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped'
- run: |
- zip -1 -r artifacts.zip dist/ build/custom_test_artifacts build/lib build/bin .additional_ci_files
-
- - name: Store PyTorch Build Artifacts on S3
- uses: seemethere/upload-artifact-s3@v5
- if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped'
- with:
- name: ${{ inputs.build-environment }}
- retention-days: 14
- if-no-files-found: error
- path: artifacts.zip
- s3-bucket: ${{ inputs.s3-bucket }}
-
- - name: Upload sccache stats
- if: steps.build.outcome != 'skipped'
- uses: seemethere/upload-artifact-s3@v5
- with:
- s3-prefix: |
- ${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
- retention-days: 365
- if-no-files-found: warn
- path: sccache-stats-*.json
- s3-bucket: ${{ inputs.s3-bucket }}
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/_linux-test-label.yml b/.github/workflows/_linux-test-label.yml
deleted file mode 100644
index 7056c0168a19e..0000000000000
--- a/.github/workflows/_linux-test-label.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-name: linux-test-rg
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- docker-image:
- required: true
- type: string
- description: Docker image to run in.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- timeout-minutes:
- required: false
- type: number
- default: 240
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
- use-gha:
- required: false
- type: string
- default: ""
- description: If set to any value, upload to GHA. Otherwise upload to S3.
- dashboard-tag:
- required: false
- type: string
- default: ""
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- HUGGING_FACE_HUB_TOKEN:
- required: false
- description: |
- HF Auth token to avoid rate limits when downloading models or datasets from hub
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Linux Test
- id: linux-test
- uses: ./.github/actions/linux-test
- with:
- build-environment: ${{ inputs.build-environment }}
- test-matrix: ${{ inputs.test-matrix }}
- docker-image: ${{ inputs.docker-image }}
- sync-tag: ${{ inputs.sync-tag }}
- use-gha: ${{ inputs.use-gha }}
- dashboard-tag: ${{ inputs.dashboard-tag }}
- s3-bucket: ${{ inputs.s3-bucket }}
- aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/_linux-test-rg.yml b/.github/workflows/_linux-test-rg.yml
deleted file mode 100644
index 6dc2f6c63bf3e..0000000000000
--- a/.github/workflows/_linux-test-rg.yml
+++ /dev/null
@@ -1,86 +0,0 @@
-name: linux-test-label
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- docker-image:
- required: true
- type: string
- description: Docker image to run in.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- timeout-minutes:
- required: false
- type: number
- default: 240
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
- use-gha:
- required: false
- type: string
- default: ""
- description: If set to any value, upload to GHA. Otherwise upload to S3.
- dashboard-tag:
- required: false
- type: string
- default: ""
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- HUGGING_FACE_HUB_TOKEN:
- required: false
- description: |
- HF Auth token to avoid rate limits when downloading models or datasets from hub
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- runs-on:
- group: ${{ matrix.runner }}
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Linux Test
- id: linux-test
- uses: ./.github/actions/linux-test
- with:
- build-environment: ${{ inputs.build-environment }}
- test-matrix: ${{ inputs.test-matrix }}
- docker-image: ${{ inputs.docker-image }}
- sync-tag: ${{ inputs.sync-tag }}
- use-gha: ${{ inputs.use-gha }}
- dashboard-tag: ${{ inputs.dashboard-tag }}
- s3-bucket: ${{ inputs.s3-bucket }}
- aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/_linux-test.yml b/.github/workflows/_linux-test.yml
deleted file mode 100644
index 5f3f290dd31da..0000000000000
--- a/.github/workflows/_linux-test.yml
+++ /dev/null
@@ -1,414 +0,0 @@
-name: linux-test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- docker-image:
- required: true
- type: string
- description: Docker image to run in.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- timeout-minutes:
- required: false
- type: number
- default: 240
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
- use-gha:
- required: false
- type: string
- default: ""
- description: If set to any value, upload to GHA. Otherwise upload to S3.
- dashboard-tag:
- required: false
- type: string
- default: ""
- s3-bucket:
- description: S3 bucket to download artifact
- required: false
- type: string
- default: "gha-artifacts"
- aws-role-to-assume:
- description: role to assume for downloading artifacts
- required: false
- type: string
- default: ""
- secrets:
- HUGGING_FACE_HUB_TOKEN:
- required: false
- description: |
- HF Auth token to avoid rate limits when downloading models or datasets from hub
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- if: ${{ !contains(matrix.runner, 'gcp.a100') }}
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- instructions: |
- All testing is done inside the container, to start an interactive session run:
- docker exec -it $(docker container ps --format '{{.ID}}') bash
-
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: configure aws credentials
- if : ${{ inputs.aws-role-to-assume != '' }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: ${{ inputs.aws-role-to-assume }}
- role-session-name: gha-linux-test
- aws-region: us-east-1
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image }}
-
- - name: Use following to pull public copy of the image
- id: print-ghcr-mirror
- env:
- ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- shell: bash
- run: |
- tag=${ECR_DOCKER_IMAGE##*/}
- echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Check if in a ARC runner
- shell: bash
- id: check_arc_runner
- run: echo "IN_ARC_RUNNER=$([ -f /.inarc ] && echo true || echo false)" >> "$GITHUB_OUTPUT"
-
- - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
- id: install-nvidia-driver
- uses: pytorch/test-infra/.github/actions/setup-nvidia@main
- if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_arc_runner.outputs.IN_ARC_RUNNER == 'false' }}
-
- - name: Lock NVIDIA A100 40GB Frequency
- run: |
- sudo nvidia-smi -pm 1
- sudo nvidia-smi -ac 1215,1410
- nvidia-smi
- if: contains(matrix.runner, 'a100')
-
- - name: Start monitoring script
- id: monitor-script
- shell: bash
- continue-on-error: true
- run: |
- python3 -m pip install psutil==5.9.1 nvidia-ml-py==11.525.84
- python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
- echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
-
- - name: Download build artifacts
- uses: ./.github/actions/download-build-artifacts
- with:
- name: ${{ inputs.build-environment }}
- s3-bucket: ${{ inputs.s3-bucket }}
-
- - name: Download TD artifacts
- continue-on-error: true
- uses: ./.github/actions/download-td-artifacts
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check for keep-going label and re-enabled test issues
- # This uses the filter-test-configs action because it conviniently
- # checks for labels and re-enabled test issues. It does not actually do
- # any filtering. All filtering is done in the build step.
- id: keep-going
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Set Test step time
- id: test-timeout
- shell: bash
- env:
- JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- run: |
- echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}"
-
- - name: Test
- id: test
- timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- BASE_SHA: ${{ github.event.pull_request.base.sha || github.sha }}
- TEST_CONFIG: ${{ matrix.config }}
- SHARD_NUMBER: ${{ matrix.shard }}
- NUM_TEST_SHARDS: ${{ matrix.num_shards }}
- REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
- CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
- VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
- NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
- NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
- TD_DISTRIBUTED: ${{ steps.keep-going.outputs.ci-td-distributed }}
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- SCCACHE_S3_KEY_PREFIX: ${{ github.workflow }}
- SHM_SIZE: ${{ contains(inputs.build-environment, 'cuda') && '2g' || '1g' }}
- DOCKER_IMAGE: ${{ inputs.docker-image }}
- XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }}
- XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
- PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
- PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
- DASHBOARD_TAG: ${{ inputs.dashboard-tag }}
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- run: |
- set -x
-
- if [[ $TEST_CONFIG == 'multigpu' ]]; then
- TEST_COMMAND=.ci/pytorch/multigpu-test.sh
- elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then
- TEST_COMMAND=.ci/onnx/test.sh
- else
- TEST_COMMAND=.ci/pytorch/test.sh
- fi
-
- # detached container should get cleaned up by teardown_ec2_linux
- # TODO: Stop building test binaries as part of the build phase
- # Used for GPU_FLAG since that doesn't play nice
- # shellcheck disable=SC2086,SC2090
- container_name=$(docker run \
- ${GPU_FLAG:-} \
- -e BUILD_ENVIRONMENT \
- -e PR_NUMBER \
- -e GITHUB_ACTIONS \
- -e GITHUB_REPOSITORY \
- -e GITHUB_WORKFLOW \
- -e GITHUB_JOB \
- -e GITHUB_RUN_ID \
- -e GITHUB_RUN_NUMBER \
- -e GITHUB_RUN_ATTEMPT \
- -e JOB_ID \
- -e JOB_NAME \
- -e BASE_SHA \
- -e BRANCH \
- -e SHA1 \
- -e AWS_DEFAULT_REGION \
- -e IN_WHEEL_TEST \
- -e SHARD_NUMBER \
- -e TEST_CONFIG \
- -e NUM_TEST_SHARDS \
- -e REENABLED_ISSUES \
- -e CONTINUE_THROUGH_ERROR \
- -e VERBOSE_TEST_LOGS \
- -e NO_TEST_TIMEOUT \
- -e NO_TD \
- -e TD_DISTRIBUTED \
- -e PR_LABELS \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e SCCACHE_BUCKET \
- -e SCCACHE_S3_KEY_PREFIX \
- -e XLA_CUDA \
- -e XLA_CLANG_CACHE_S3_BUCKET_NAME \
- -e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \
- -e PYTORCH_TEST_RERUN_DISABLED_TESTS \
- -e SKIP_SCCACHE_INITIALIZATION=1 \
- -e HUGGING_FACE_HUB_TOKEN \
- -e DASHBOARD_TAG \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --ipc=host \
- --shm-size="${SHM_SIZE}" \
- --tty \
- --detach \
- --name="${container_name}" \
- --user jenkins \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- # Propagate download.pytorch.org IP to container
- grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" sudo bash -c "/bin/cat >> /etc/hosts"
- echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
- docker exec -t "${container_name}" sh -c "pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}"
-
- - name: Upload pytest cache if tests failed
- uses: ./.github/actions/pytest-cache-upload
- continue-on-error: true
- if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure'
- with:
- cache_dir: .pytest_cache
- shard: ${{ matrix.shard }}
- sha: ${{ github.event.pull_request.head.sha || github.sha }}
- test_config: ${{ matrix.config }}
- job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Stop monitoring script
- if: always() && steps.monitor-script.outputs.monitor-script-pid
- shell: bash
- continue-on-error: true
- env:
- MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
- run: |
- kill "$MONITOR_SCRIPT_PID"
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
- use-gha: ${{ inputs.use-gha }}
- s3-bucket: ${{ inputs.s3-bucket }}
-
- - name: Collect backtraces from coredumps (if any)
- if: always()
- run: |
- # shellcheck disable=SC2156
- find . -iname "core.[1-9]*" -exec docker exec "${DOCKER_CONTAINER_ID}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \;
-
- - name: Store Core dumps on S3
- uses: seemethere/upload-artifact-s3@v5
- if: failure()
- with:
- name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}
- retention-days: 14
- if-no-files-found: ignore
- path: ./**/core.[1-9]*
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
-
- # NB: We are currently having an intermittent GPU-related issue on G5 runners with
- # A10G GPU. Once this happens, trying to reset the GPU as done in setup-nvidia does
- # not seem to help. Here are some symptoms:
- # * Calling nvidia-smi timeouts after 60 second
- # * Fail to run nvidia-smi with an unable to determine the device handle for GPU
- # unknown error
- # * Test fails with a missing CUDA GPU error when initializing CUDA in PyTorch
- # * Run docker --gpus all fails with error response from daemon
- #
- # As both the root cause and recovery path are unclear, let's take the runner out of
- # service so that it doesn't get any more jobs
- - name: Check NVIDIA driver installation step
- if: failure() && steps.install-nvidia-driver.outcome && steps.install-nvidia-driver.outcome != 'skipped'
- shell: bash
- env:
- RUNNER_WORKSPACE: ${{ runner.workspace }}
- run: |
- set +e
- set -x
-
- nvidia-smi
- # NB: Surprisingly, nvidia-smi command returns successfully with return code 0 even in
- # the case where the driver has already crashed as it still can get the driver version
- # and some basic information like the bus ID. However, the rest of the information
- # would be missing (ERR!), for example:
- #
- # +-----------------------------------------------------------------------------+
- # | NVIDIA-SMI 525.89.02 Driver Version: 525.89.02 CUDA Version: 12.0 |
- # |-------------------------------+----------------------+----------------------+
- # | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
- # | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
- # | | | MIG M. |
- # |===============================+======================+======================|
- # | 0 ERR! Off | 00000000:00:1E.0 Off | ERR! |
- # |ERR! ERR! ERR! ERR! / ERR! | 4184MiB / 23028MiB | ERR! Default |
- # | | | ERR! |
- # +-------------------------------+----------------------+----------------------+
- #
- # +-----------------------------------------------------------------------------+
- # | Processes: |
- # | GPU GI CI PID Type Process name GPU Memory |
- # | ID ID Usage |
- # |=============================================================================|
- # +-----------------------------------------------------------------------------+
- #
- # This should be reported as a failure instead as it will guarantee to fail when
- # Docker tries to run with --gpus all
- #
- # So, the correct check here is to query one of the missing piece of info like
- # GPU name, so that the command can fail accordingly
- nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0
- NVIDIA_SMI_STATUS=$?
-
- # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action
- if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then
- echo "NVIDIA driver installation has failed, shutting down the runner..."
- .github/scripts/stop_runner_service.sh
- fi
-
- # For runner with multiple GPUs, we also want to confirm that the number of GPUs are the
- # power of 2, i.e. 1, 2, 4, or 8. This is to avoid flaky test issue when one GPU fails
- # https://github.com/pytorch/test-infra/issues/4000
- GPU_COUNT=$(nvidia-smi --list-gpus | wc -l)
- NVIDIA_SMI_STATUS=$?
-
- # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action
- if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then
- echo "NVIDIA driver installation has failed, shutting down the runner..."
- .github/scripts/stop_runner_service.sh
- fi
-
- # Check the GPU count to be a power of 2
- if [ "$GPU_COUNT" -le 8 ] && [ "$GPU_COUNT" -ne 1 ] && [ "$GPU_COUNT" -ne 2 ] && [ "$GPU_COUNT" -ne 4 ] && [ "$GPU_COUNT" -ne 8 ]; then
- echo "NVIDIA driver detects $GPU_COUNT GPUs. The runner has a broken GPU, shutting it down..."
- .github/scripts/stop_runner_service.sh
- fi
diff --git a/.github/workflows/_mac-build.yml b/.github/workflows/_mac-build.yml
deleted file mode 100644
index a27ddaf629b51..0000000000000
--- a/.github/workflows/_mac-build.yml
+++ /dev/null
@@ -1,210 +0,0 @@
-name: mac-build
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- runner-type:
- required: true
- type: string
- description: Name of the GitHub-managed runner type to use for the build.
- build-generates-artifacts:
- required: true
- type: boolean
- description: If set, upload generated build artifacts.
- xcode-version:
- required: false
- type: string
- default: ""
- description: What xcode version to build with.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- python-version:
- required: false
- type: string
- default: "3.8"
- description: |
- The python version to be used. Will be 3.8 by default
- environment-file:
- required: false
- type: string
- description: Set the conda environment file used to setup macOS build.
- test-matrix:
- required: false
- type: string
- description: |
- An option JSON description of what test configs to run later on. This
- is moved here from the Linux test workflow so that we can apply filter
- logic using test-config labels earlier and skip unnecessary builds
- sccache-use-gha:
- required: false
- type: boolean
- default: false
- description: If true, use the Github cache as the storage option for sccache instead of S3.
-
- outputs:
- test-matrix:
- value: ${{ jobs.build.outputs.test-matrix }}
- description: An optional JSON description of what test configs to run later on.
- build-outcome:
- value: ${{ jobs.build.outputs.build-outcome }}
- description: The outcome of the build step. This is used to influence test filtering logic later on.
-
-jobs:
- build:
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch'
- runs-on: ${{ inputs.runner-type }}
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- SCCACHE_USE_GHA: ${{ inputs.sccache-use-gha }} # this is placed here instead of the sccache step to appease actionlint
- outputs:
- build-outcome: ${{ steps.build.outcome }}
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- steps:
- - name: Clean up disk space before running MacOS workflow
- uses: pytorch/test-infra/.github/actions/check-disk-space@main
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Set xcode version
- env:
- XCODE_VERSION: ${{ inputs.xcode-version }}
- run: |
- if [ -n "${XCODE_VERSION}" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_${XCODE_VERSION}.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
-
- - name: Setup miniconda
- if: inputs.environment-file == ''
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: ${{ inputs.python-version }}
- environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
- pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt
-
- # This option is used when cross-compiling arm64 from x86-64. Specifically, we need arm64 conda
- # environment even though the arch is x86-64
- - name: Setup miniconda using the provided environment file
- if: inputs.environment-file != ''
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: ${{ inputs.python-version }}
- environment-file: ${{ inputs.environment-file }}
- pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt
-
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- set -ex
-
- DOWNLOAD_SCCACHE=0
- SCCACHE_VERSION="0.4.1"
- LOCAL_PATH="/usr/local/bin"
-
- if [ ! -f "${LOCAL_PATH}/sccache" ]; then
- DOWNLOAD_SCCACHE=1
- else
- LOCAL_VERSION=$("${LOCAL_PATH}/sccache" --version | cut -d" " -f2)
-
- if [ "${LOCAL_VERSION}" != "${SCCACHE_VERSION}" ]; then
- DOWNLOAD_SCCACHE=1
- fi
- fi
-
- if [ "${DOWNLOAD_SCCACHE}" == "1" ]; then
- sudo curl --retry 3 --retry-all-errors "https://s3.amazonaws.com/ossci-macos/sccache/sccache-v0.4.1-${RUNNER_ARCH}" --output "${LOCAL_PATH}/sccache"
- sudo chmod +x "${LOCAL_PATH}/sccache"
- fi
-
- if [[ "${SCCACHE_USE_GHA}" == "true" ]]; then
- echo "ACTIONS_CACHE_URL=${ACTIONS_CACHE_URL}" >> "${GITHUB_ENV}"
- echo "ACTIONS_RUNTIME_TOKEN=${ACTIONS_RUNTIME_TOKEN}" >> "${GITHUB_ENV}"
- echo "SCCACHE_GHA_ENABLED=on" >> "${GITHUB_ENV}"
- else
- # The runner has access to the S3 bucket via IAM profile without the need
- # for any credential
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- echo "SCCACHE_S3_KEY_PREFIX=${GITHUB_WORKFLOW}" >> "${GITHUB_ENV}"
- fi
-
- # This is needed so that later build script could find sccache (which sccache)
- echo "${LOCAL_PATH}" >> $GITHUB_PATH
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- # Apply the filter logic to the build step too if the test-config label is already there
- - name: Select all requested test configurations (if the test matrix is available)
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Build
- if: steps.filter.outputs.is-test-matrix-empty == 'False' || inputs.test-matrix == ''
- id: build
- env:
- OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- run: |
- echo "CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname "$(which conda)")/../"}" >> "${GITHUB_ENV}"
-
- if [[ -n "$CONDA_ENV" ]]; then
- # Use binaries under conda environment
- export PATH="$CONDA_ENV/bin":$PATH
- fi
-
- # NB: Same trick as Linux, there is no need to initialize sccache with the risk of getting
- # it hangs or timeout at initialization. The cache will be started automatically
- export SKIP_SCCACHE_INITIALIZATION=1
- ${CONDA_RUN} .ci/pytorch/macos-build.sh
-
- - name: Archive artifacts into zip
- if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped'
- run: |
- zip -1 -r artifacts.zip dist/ build/.ninja_log build/compile_commands.json .additional_ci_files
-
- - name: Store PyTorch Build Artifacts on GHA
- uses: actions/upload-artifact@v3
- if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped'
- with:
- name: ${{ env.BUILD_ENVIRONMENT }}
- retention-days: 14
- if-no-files-found: error
- path: artifacts.zip
-
- - name: Upload sccache stats to GHA
- uses: actions/upload-artifact@v3
- # Only if sccache is installed, see above
- if: ${{ (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && steps.build.outcome != 'skipped' }}
- with:
- name: sccache-stats-${{ inputs.build-environment }}-runattempt${{ github.run_attempt }}-${{ steps.get-job-id.outputs.job-id }}
- retention-days: 14
- if-no-files-found: warn
- path: sccache-stats-*.json
-
- - name: Clean up disk space
- if: always()
- continue-on-error: true
- uses: pytorch/test-infra/.github/actions/check-disk-space@main
diff --git a/.github/workflows/_mac-test-mps.yml b/.github/workflows/_mac-test-mps.yml
deleted file mode 100644
index 2c0da2f8afd7c..0000000000000
--- a/.github/workflows/_mac-test-mps.yml
+++ /dev/null
@@ -1,162 +0,0 @@
-name: mac-test-arm64
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- python-version:
- required: false
- type: string
- default: "3.8"
- description: |
- The python version to be used. Will be 3.8 by default
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- ci-verbose-test-logs: ${{ steps.filter.outputs.ci-verbose-test-logs }}
- ci-no-test-timeout: ${{ steps.filter.outputs.ci-no-test-timeout }}
- ci-no-td: ${{ steps.filter.outputs.ci-no-td }}
- reenabled-issues: ${{ steps.filter.outputs.reenabled-issues }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- test:
- needs: filter
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- steps:
- - name: Print runner OS/HW info
- run: |
- sysctl machdep.cpu.brand_string kern.osproductversion
-
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- quiet-checkout: true
-
- - name: Clean checkout
- run: |
- git clean -fxd
-
- - name: Download build artifacts
- uses: ./.github/actions/download-build-artifacts
- with:
- name: ${{ inputs.build-environment }}
- use-gha: true
-
- - name: Setup miniconda
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: ${{ inputs.python-version }}
- environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
- pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt
-
- - name: Install PyTorch and run MPS tests
- id: test
- env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- TEST_CONFIG: ${{ matrix.config }}
- ENV_NAME: conda-test-env-${{ github.run_id }}
- PY_VERS: 3.9
- PR_BODY: ${{ github.event.pull_request.body }}
- CONTINUE_THROUGH_ERROR: ${{ needs.filter.outputs.keep-going }}
- VERBOSE_TEST_LOGS: ${{ needs.filter.outputs.ci-verbose-test-logs }}
- NO_TEST_TIMEOUT: ${{ needs.filter.outputs.ci-no-test-timeout }}
- NO_TD: ${{ needs.filter.outputs.ci-no-td }}
- PIP_REQUIREMENTS_FILE: .github/requirements/pip-requirements-${{ runner.os }}.txt
- REENABLED_ISSUES: ${{ needs.filter.outputs.reenabled-issues }}
- run: |
- # shellcheck disable=SC1090
- set -ex
-
- if [[ -n "$CONDA_ENV" ]]; then
- # Use binaries under conda environment
- export PATH="$CONDA_ENV/bin":$PATH
- fi
-
- # Print out some information about the test environment
- which conda
- conda --version
- ${CONDA_RUN} which python3
- ${CONDA_RUN} python3 --version
- ${CONDA_RUN} which python
- ${CONDA_RUN} python --version
-
- ${CONDA_RUN} python3 -mpip install --no-index --no-deps dist/*.whl
-
- set +e
- pushd "${RUNNER_TEMP}"
- # Install pip dependencies if they are not found. This is to mitigate a peculiar
- # flaky missing dependencies on MacOS
- ${CONDA_RUN} python3 -c "import torch"
- RC=$?
- popd
-
- if [ "${RC}" -ne 0 ]; then
- ${CONDA_RUN} python3 -mpip install --ignore-installed -r "${PIP_REQUIREMENTS_FILE}"
- fi
- set -e
-
- ${CONDA_RUN} python3 test/run_test.py --mps --verbose
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- use-gha: true
- file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
-
- - name: Clean up disk space
- if: always()
- continue-on-error: true
- uses: pytorch/test-infra/.github/actions/check-disk-space@main
diff --git a/.github/workflows/_mac-test.yml b/.github/workflows/_mac-test.yml
deleted file mode 100644
index 3e82194ff3461..0000000000000
--- a/.github/workflows/_mac-test.yml
+++ /dev/null
@@ -1,219 +0,0 @@
-name: mac-test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- python-version:
- required: false
- type: string
- default: "3.8"
- description: |
- The python version to be used. Will be 3.8 by default
- timeout-minutes:
- required: false
- type: number
- default: 270
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- # For setup-miniconda, see https://github.com/conda-incubator/setup-miniconda/issues/179
- # Also ensure that we always run with the right architecture
- defaults:
- run:
- shell: bash -e -l {0}
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- TEST_CONFIG: ${{ matrix.config }}
- SHARD_NUMBER: ${{ matrix.shard }}
- NUM_TEST_SHARDS: ${{ matrix.num_shards }}
- PR_BODY: ${{ github.event.pull_request.body }}
- steps:
- - name: Print runner OS/HW info
- run: |
- sysctl machdep.cpu.brand_string kern.osproductversion
-
- - name: Clean up leftover processes on MacOS pet runner
- continue-on-error: true
- run: |
- for PROCESS in "python" "conda" "ninja" "clang"; do
- echo "Cleaning up all remaining ${PROCESS} process"
- pkill "${PROCESS}" || true
- done
-
- - name: Clean up leftover local python3 site-packages on MacOS pet runner
- continue-on-error: true
- run: |
- for dir in ~/.local/lib/python3.*/site-packages; do
- echo "Cleaning up ${dir}"
- rm -rf "${dir}"
- done
-
- - name: Clean up disk space before running MacOS workflow
- uses: pytorch/test-infra/.github/actions/check-disk-space@main
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Download build artifacts
- uses: ./.github/actions/download-build-artifacts
- with:
- name: ${{ inputs.build-environment }}
- use-gha: true
-
- - name: Download TD artifacts
- continue-on-error: true
- uses: ./.github/actions/download-td-artifacts
- with:
- use-gha: true
-
- - name: Setup miniconda
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: ${{ inputs.python-version }}
- environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}
- pip-requirements-file: .github/requirements/pip-requirements-${{ runner.os }}.txt
-
- - name: Start monitoring script
- id: monitor-script
- continue-on-error: true
- run: |
- ${CONDA_RUN} python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
- echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check for keep-going label and re-enabled test issues
- # This uses the filter-test-configs action because it conviniently
- # checks for labels and re-enabled test issues. It does not actually do
- # any filtering. All filtering is done in the build step.
- id: keep-going
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Set Test step time
- id: test-timeout
- shell: bash
- env:
- JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- run: |
- echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}"
-
- - name: Test
- id: test
- timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
- env:
- PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
- PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
- CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
- VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
- NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
- NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
- PIP_REQUIREMENTS_FILE: .github/requirements/pip-requirements-${{ runner.os }}.txt
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
- REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
- run: |
- # shellcheck disable=SC1090
- set -ex
-
- arch
-
- if [[ -n "$CONDA_ENV" ]]; then
- # Use binaries under conda environment
- export PATH="$CONDA_ENV/bin":$PATH
- fi
-
- # Print out some information about the test environment
- which conda
- conda --version
- ${CONDA_RUN} which python3
- ${CONDA_RUN} python3 --version
- ${CONDA_RUN} which python
- ${CONDA_RUN} python --version
-
- ${CONDA_RUN} python3 -mpip install --no-index --no-deps dist/*.whl
-
- set +e
- pushd "${RUNNER_TEMP}"
- # Install pip dependencies if they are not found. This is to mitigate a peculiar
- # flaky missing dependencies on MacOS
- ${CONDA_RUN} python3 -c "import torch"
- RC=$?
- popd
-
- if [ "${RC}" -ne 0 ]; then
- ${CONDA_RUN} python3 -mpip install --ignore-installed -r "${PIP_REQUIREMENTS_FILE}"
- fi
- set -e
-
- ${CONDA_RUN} .ci/pytorch/macos-test.sh
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Stop monitoring script
- if: always() && ${{ steps.monitor-script.outputs.monitor-script-pid }}
- continue-on-error: true
- env:
- MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
- run: |
- kill "$MONITOR_SCRIPT_PID"
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- use-gha: true
- file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
-
- - name: Clean up disk space
- if: always()
- continue-on-error: true
- uses: pytorch/test-infra/.github/actions/check-disk-space@main
diff --git a/.github/workflows/_rocm-test.yml b/.github/workflows/_rocm-test.yml
deleted file mode 100644
index 1f2d86273ee14..0000000000000
--- a/.github/workflows/_rocm-test.yml
+++ /dev/null
@@ -1,279 +0,0 @@
-# TODO: this looks sort of similar to _linux-test, but there are like a dozen
-# places where you would have to insert an if statement. Probably it's better to
-# just use a different workflow altogether
-
-name: test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- docker-image:
- required: true
- type: string
- description: Docker image to run in.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- timeout-minutes:
- required: false
- type: number
- default: 300
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
- tests-to-include:
- required: false
- type: string
- default: ""
- description: |
- List of tests to include (empty string implies default list)
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-permissions:
- id-token: write
- contents: read
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- runs-on: ${{ matrix.runner }}
- steps:
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- no-sudo: true
-
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
-
- - name: configure aws credentials
- id: aws_creds
- uses: aws-actions/configure-aws-credentials@v4
- with:
- role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
- aws-region: us-east-1
- role-duration-seconds: 18000
-
- - name: Login to Amazon ECR
- id: login-ecr
- continue-on-error: true
- uses: aws-actions/amazon-ecr-login@v2
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image }}
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Start monitoring script
- id: monitor-script
- shell: bash
- continue-on-error: true
- run: |
- python3 -m pip install psutil==5.9.1 nvidia-ml-py==11.525.84
- python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
- echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
-
- - name: Download build artifacts
- uses: ./.github/actions/download-build-artifacts
- with:
- name: ${{ inputs.build-environment }}
-
- - name: Download TD artifacts
- continue-on-error: true
- uses: ./.github/actions/download-td-artifacts
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check for keep-going label and re-enabled test issues
- # This uses the filter-test-configs action because it conviniently
- # checks for labels and re-enabled test issues. It does not actually do
- # any filtering. All filtering is done in the build step.
- id: keep-going
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Set Test step time
- id: test-timeout
- shell: bash
- env:
- JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- run: |
- echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}"
-
- - name: Test
- id: test
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
- VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
- NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
- NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
- TEST_CONFIG: ${{ matrix.config }}
- SHARD_NUMBER: ${{ matrix.shard }}
- NUM_TEST_SHARDS: ${{ matrix.num_shards }}
- REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- DOCKER_IMAGE: ${{ inputs.docker-image }}
- XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
- PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
- PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
- TESTS_TO_INCLUDE: ${{ inputs.tests-to-include }}
- timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
- run: |
- set -x
-
- if [[ $TEST_CONFIG == 'multigpu' ]]; then
- TEST_COMMAND=.ci/pytorch/multigpu-test.sh
- elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then
- TEST_COMMAND=.ci/caffe2/test.sh
- else
- TEST_COMMAND=.ci/pytorch/test.sh
- fi
-
- # detached container should get cleaned up by teardown_ec2_linux
- # TODO: Stop building test binaries as part of the build phase
- # Used for GPU_FLAG since that doesn't play nice
- # shellcheck disable=SC2086,SC2090
- container_name=$(docker run \
- ${GPU_FLAG:-} \
- -e BUILD_ENVIRONMENT \
- -e PR_NUMBER \
- -e GITHUB_ACTIONS \
- -e GITHUB_REPOSITORY \
- -e GITHUB_WORKFLOW \
- -e GITHUB_JOB \
- -e GITHUB_RUN_ID \
- -e GITHUB_RUN_NUMBER \
- -e GITHUB_RUN_ATTEMPT \
- -e JOB_ID \
- -e JOB_NAME \
- -e BRANCH \
- -e SHA1 \
- -e AWS_DEFAULT_REGION \
- -e IN_WHEEL_TEST \
- -e SHARD_NUMBER \
- -e TEST_CONFIG \
- -e NUM_TEST_SHARDS \
- -e REENABLED_ISSUES \
- -e CONTINUE_THROUGH_ERROR \
- -e VERBOSE_TEST_LOGS \
- -e NO_TEST_TIMEOUT \
- -e NO_TD \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e SCCACHE_BUCKET \
- -e XLA_CLANG_CACHE_S3_BUCKET_NAME \
- -e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \
- -e PYTORCH_TEST_RERUN_DISABLED_TESTS \
- -e TESTS_TO_INCLUDE \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --ulimit stack=10485760:83886080 \
- --ulimit core=0 \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --shm-size="8g" \
- --tty \
- --detach \
- --name="${container_name}" \
- --user jenkins \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- # save container name for later step
- echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV"
- # jenkins user does not have write permission to mounted workspace; work-around by copying within container to jenkins home
- docker exec -t "${container_name}" sh -c "cd .. && cp -R workspace pytorch && cd pytorch && pip install dist/*.whl && ${TEST_COMMAND}"
-
- - name: Save test results
- if: always()
- run: |
- # copy test results back to the mounted workspace, needed sudo, resulting permissions were correct
- docker exec -t "${{ env.CONTAINER_NAME }}" sh -c "cd ../pytorch && sudo cp -R test/test-reports ../workspace/test"
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Stop monitoring script
- if: always() && steps.monitor-script.outputs.monitor-script-pid
- shell: bash
- continue-on-error: true
- env:
- MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
- run: |
- kill "$MONITOR_SCRIPT_PID"
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- use-gha: true
- file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
-
- - name: Collect backtraces from coredumps (if any)
- if: always()
- run: |
- # shellcheck disable=SC2156
- find . -iname "core.[1-9]*" -exec docker exec "${CONTAINER_NAME}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \;
-
- - name: Store Core dumps on GitHub
- uses: actions/upload-artifact@v3
- if: failure()
- with:
- name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}
- retention-days: 14
- if-no-files-found: ignore
- path: ./**/core.[1-9]*
-
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
diff --git a/.github/workflows/_run_android_tests.yml b/.github/workflows/_run_android_tests.yml
deleted file mode 100644
index b9b3d0645eac6..0000000000000
--- a/.github/workflows/_run_android_tests.yml
+++ /dev/null
@@ -1,107 +0,0 @@
-name: android-tests
-
-on:
- workflow_call:
- inputs:
- test-matrix:
- required: true
- type: string
- description: |
- A JSON description of what configs to run later on.
-
-defaults:
- run:
- shell: bash -e -l {0}
-
-jobs:
- filter:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
- keep-going: ${{ steps.filter.outputs.keep-going }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - name: Select all requested test configurations
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
-
- build-and-test:
- needs: filter
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch' && needs.filter.outputs.is-test-matrix-empty == 'False'
- strategy:
- matrix: ${{ fromJSON(needs.filter.outputs.test-matrix) }}
- fail-fast: false
- # NB: This job can only run on GitHub Linux runner atm. This is an ok thing though
- # because that runner is ephemeral and could access upload secrets
- runs-on: ${{ matrix.runner }}
- env:
- # GitHub runner installs Android SDK on this path
- ANDROID_ROOT: /usr/local/lib/android
- ANDROID_NDK_VERSION: '21.4.7075529'
- BUILD_LITE_INTERPRETER: ${{ matrix.use_lite_interpreter }}
- # 4 of them are supported atm: armeabi-v7a, arm64-v8a, x86, x86_64
- SUPPORT_ABI: '${{ matrix.support_abi }}'
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup miniconda
- uses: pytorch/test-infra/.github/actions/setup-miniconda@main
- with:
- python-version: 3.8
- environment-file: .github/requirements/conda-env-${{ runner.os }}-${{ runner.arch }}.txt
-
- - name: Install NDK
- uses: nick-fields/retry@v2.8.2
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- set -eux
-
- # Install NDK 21 after GitHub update
- # https://github.com/actions/virtual-environments/issues/5595
- ANDROID_SDK_ROOT="${ANDROID_ROOT}/sdk"
- ANDROID_NDK="${ANDROID_SDK_ROOT}/ndk-bundle"
-
- SDKMANAGER="${ANDROID_SDK_ROOT}/cmdline-tools/latest/bin/sdkmanager"
- # NB: This step downloads and installs NDK, thus it could be flaky.
- # However, SDKMANAGER doesn't return a non-zero status code when it
- # happens despite the fact that the corrupted file that it has isn't
- # a ZIP archive and couldn't be extracted
- echo "y" | ${SDKMANAGER} "ndk;${ANDROID_NDK_VERSION}"
-
- ln -sfn "${ANDROID_SDK_ROOT}/ndk/${ANDROID_NDK_VERSION}" "${ANDROID_NDK}"
- # So, we need to manually verify the existence of NDK afterward
- # and return a failure if the file isn't there
- if [ ! -f "${ANDROID_NDK}/build/cmake/android.toolchain.cmake" ]; then
- exit 1
- fi
-
- echo "ANDROID_SDK_ROOT=${ANDROID_SDK_ROOT}" >> "${GITHUB_ENV}"
- echo "ANDROID_NDK=${ANDROID_NDK}" >> "${GITHUB_ENV}"
-
- - name: Build PyTorch Android
- run: |
- set -eux
-
- echo "CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname "$(which conda)")/../"}" >> "${GITHUB_ENV}"
- ${CONDA_RUN} ./scripts/build_pytorch_android.sh "${SUPPORT_ABI}"
-
- - name: Run tests
- uses: reactivecircus/android-emulator-runner@v2
- with:
- api-level: 25
- script: ./android/run_tests.sh
diff --git a/.github/workflows/_runner-determinator.yml b/.github/workflows/_runner-determinator.yml
deleted file mode 100644
index 861f25a7bca3e..0000000000000
--- a/.github/workflows/_runner-determinator.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-name: Check whether the workflow owner can use ARC runners
-
-on:
- workflow_call:
- inputs:
- user_name:
- required: true
- type: string
- description: The name of the workflow owner.
- curr_branch:
- required: true
- type: string
- description: Current branch.
- issue_number:
- required: false
- type: string
- default: "5132"
-
- outputs:
- workflow-type:
- description: Type of runners to use
- value: ${{ jobs.runner-determinator.outputs.workflow-type }}
-
-jobs:
- runner-determinator:
- runs-on: linux.4xlarge
- outputs:
- workflow-type: ${{ steps.set-condition.outputs.workflow-type }}
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ISSUE_NUMBER: ${{ inputs.issue_number }}
- USERNAME: ${{ inputs.user_name }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: true
-
- - name: Install dependencies
- run: python3 -m pip install urllib3==1.26.18 PyGithub==2.3.0
-
- - name: Get the workflow type for the current user
- id: set-condition
- run: |
- curr_branch="${{ inputs.curr_branch }}"
- echo "Current branch is '$curr_branch'"
-
- output="$(python3 .github/scripts/get_workflow_type.py \
- --github-token "$GITHUB_TOKEN" \
- --github-issue "$ISSUE_NUMBER" \
- --github-branch "$curr_branch" \
- --github-user "$USERNAME")"
-
- echo "Output: '${output}'"
-
- WORKFLOW_TYPE=$(echo "${output}" | jq -r '.workflow_type')
- echo "workflow-type=$WORKFLOW_TYPE" >> "$GITHUB_OUTPUT"
diff --git a/.github/workflows/_win-build.yml b/.github/workflows/_win-build.yml
deleted file mode 100644
index bc381c50628d1..0000000000000
--- a/.github/workflows/_win-build.yml
+++ /dev/null
@@ -1,171 +0,0 @@
-name: windows-build
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- cuda-version:
- required: true
- type: string
- description: What CUDA version to build with, "cpu" for none.
- build-with-debug:
- required: false
- type: boolean
- default: false
- description: If set, build in debug mode.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- test-matrix:
- required: false
- type: string
- description: |
- An option JSON description of what test configs to run later on. This
- is moved here from the Linux test workflow so that we can apply filter
- logic using test-config labels earlier and skip unnecessary builds
-
- outputs:
- test-matrix:
- value: ${{ jobs.build.outputs.test-matrix }}
- description: An optional JSON description of what test configs to run later on.
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- build:
- # Don't run on forked repos.
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, windows.4xlarge.nonephemeral]
- timeout-minutes: 240
- outputs:
- test-matrix: ${{ steps.filter.outputs.test-matrix }}
- steps:
- # Duplicated in win-test because this MUST go before a checkout
- - name: Enable git symlinks on Windows and disable fsmonitor daemon
- shell: bash
- run: |
- git config --global core.symlinks true
-
- # https://git-scm.com/docs/git-fsmonitor--daemon. The daemon could lock
- # the directory on Windows and prevent GHA from checking out as reported
- # in https://github.com/actions/checkout/issues/1018
- git config --global core.fsmonitor false
-
- - name: Clean up leftover processes on non-ephemeral Windows runner
- uses: pytorch/test-infra/.github/actions/cleanup-runner@main
-
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- instructions: |
- To forward remote desktop on your local machine ssh as follows:
- ssh -L 3389:localhost:3389 %%username%%@%%hostname%%
- And then change password using `passwd` command.
-
- To start build locally, change working folder to \actions-runner\_work\pytorch\pytorch,
- Activate miniconda and Visual Studio environment, by running:
- call C:\Jenkins\Miniconda3\Scripts\activate.bat C:\Jenkins\Miniconda3
- call "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- no-sudo: true
-
- - name: Setup Windows
- uses: ./.github/actions/setup-win
- with:
- cuda-version: ${{ inputs.cuda-version }}
-
- - name: Parse ref
- id: parse-ref
- run: python3 .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- # Apply the filter logic to the build step too if the test-config label is already there
- - name: Select all requested test configurations (if the test matrix is available)
- id: filter
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Download pytest cache
- uses: ./.github/actions/pytest-cache-download
- continue-on-error: true
- with:
- cache_dir: .pytest_cache
- job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
-
- - name: Build
- if: steps.filter.outputs.is-test-matrix-empty == 'False' || inputs.test-matrix == ''
- id: build
- shell: bash
- env:
- PYTORCH_FINAL_PACKAGE_DIR: /c/${{ github.run_id }}/build-results/
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- BUILD_WHEEL: 1
- MAX_JOBS: 8
- CUDA_VERSION: ${{ inputs.cuda-version }}
- PYTHON_VERSION: "3.8"
- SCCACHE_BUCKET: "ossci-compiler-cache"
- SCCACHE_S3_KEY_PREFIX: ${{ github.workflow }}
- SCCACHE_REGION: us-east-1
- VC_PRODUCT: "BuildTools"
- VC_VERSION: ""
- VC_YEAR: "2019"
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- AWS_DEFAULT_REGION: us-east-1
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- DEBUG: ${{ inputs.build-with-debug && '1' || '0' }}
- TORCH_CUDA_ARCH_LIST: "8.6"
- USE_CUDA: ${{ inputs.cuda-version != 'cpu' && '1' || '0' }}
- OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- run: |
- .ci/pytorch/win-build.sh
-
- # Upload to github so that people can click and download artifacts
- - name: Upload artifacts to s3
- if: steps.build.outcome != 'skipped'
- uses: seemethere/upload-artifact-s3@v5
- with:
- retention-days: 14
- if-no-files-found: error
- name: ${{ inputs.build-environment }}
- path: C:\${{ github.run_id }}\build-results
-
- - name: Upload sccache stats
- if: steps.build.outcome != 'skipped'
- uses: seemethere/upload-artifact-s3@v5
- with:
- s3-prefix: |
- ${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact
- retention-days: 14
- if-no-files-found: warn
- path: sccache-stats-*.json
-
- - name: Teardown Windows
- uses: ./.github/actions/teardown-win
- if: always()
- timeout-minutes: 120
- with:
- extra-delete-dir: /c/${{ github.run_id }}/build-results/
diff --git a/.github/workflows/_win-test.yml b/.github/workflows/_win-test.yml
deleted file mode 100644
index 99d037f0355ce..0000000000000
--- a/.github/workflows/_win-test.yml
+++ /dev/null
@@ -1,241 +0,0 @@
-name: win-test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- cuda-version:
- required: true
- type: string
- description: What CUDA version to build with, "cpu" for none.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- timeout-minutes:
- required: false
- type: number
- default: 240
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- runs-on: ${{ matrix.runner }}
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- steps:
- # Duplicated in win-build because this MUST go before a checkout
- - name: Enable git symlinks on Windows and disable fsmonitor daemon
- shell: bash
- run: |
- git config --global core.symlinks true
-
- # https://git-scm.com/docs/git-fsmonitor--daemon. The daemon could lock
- # the directory on Windows and prevent GHA from checking out as reported
- # in https://github.com/actions/checkout/issues/1018
- git config --global core.fsmonitor false
-
- - name: Clean up leftover processes on non-ephemeral Windows runner
- uses: pytorch/test-infra/.github/actions/cleanup-runner@main
-
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- instructions: |
- To forward remote desktop on your local machine ssh as follows:
- ssh -L 3389:localhost:3389 %%username%%@%%hostname%%
- And then change password using `passwd` command.
-
- To start tests locally, change working folder to \actions-runner\_work\pytorch\pytorch\test,
- Activate miniconda and Visual Studio environment and set PYTHON_PATH, by running:
- call C:\Jenkins\Miniconda3\Scripts\activate.bat C:\Jenkins\Miniconda3
- call "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
- set PYTHONPATH=C:\actions-runner\_work\pytorch\pytorch\build\win_tmp\build
-
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- no-sudo: true
-
- - name: Setup Windows
- uses: ./.github/actions/setup-win
- with:
- cuda-version: ${{ inputs.cuda-version }}
-
- # TODO: Move to a requirements.txt file for windows
- - name: Install pip dependencies
- uses: nick-fields/retry@3e91a01664abd3c5cd539100d10d33b9c5b68482
- with:
- shell: bash
- timeout_minutes: 5
- max_attempts: 5
- retry_wait_seconds: 30
- command: |
- set -eu
- python3 -m pip install rockset==1.0.3 'xdoctest>=1.1.0'
-
- - name: Start monitoring script
- id: monitor-script
- shell: bash
- continue-on-error: true
- run: |
- # Windows conda doesn't have python3 binary, only python, but it's python3
- ${CONDA_RUN} python -m tools.stats.monitor > usage_log.txt 2>&1 &
- echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
-
- - name: Download PyTorch Build Artifacts
- uses: seemethere/download-artifact-s3@v4
- with:
- name: ${{ inputs.build-environment }}
- path: C:\${{ github.run_id }}\build-results
-
- - name: Check build-results folder
- shell: powershell
- run: |
- tree /F C:\$Env:GITHUB_RUN_ID\build-results
-
- - name: Download TD artifacts
- continue-on-error: true
- uses: ./.github/actions/download-td-artifacts
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check for keep-going label and re-enabled test issues
- # This uses the filter-test-configs action because it conviniently
- # checks for labels and re-enabled test issues. It does not actually do
- # any filtering. All filtering is done in the build step.
- id: keep-going
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Set Test step time
- id: test-timeout
- shell: bash
- env:
- JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- run: |
- echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}"
-
- - name: Test
- id: test
- shell: bash
- timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
- env:
- USE_CUDA: ${{ inputs.cuda-version != 'cpu' && '1' || '0' }}
- INSTALL_WINDOWS_SDK: 1
- PYTHON_VERSION: 3.8
- CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
- VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
- NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
- NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
- VC_PRODUCT: "BuildTools"
- VC_VERSION: ""
- VS_VERSION: "16.8.6"
- VC_YEAR: "2019"
- AWS_DEFAULT_REGION: us-east-1
- PR_NUMBER: ${{ github.event.pull_request.number }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- CUDA_VERSION: ${{ inputs.cuda-version }}
- PYTORCH_FINAL_PACKAGE_DIR: /c/${{ github.run_id }}/build-results/
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- SHARD_NUMBER: ${{ matrix.shard }}
- NUM_TEST_SHARDS: ${{ matrix.num_shards }}
- TEST_CONFIG: ${{ matrix.config }}
- REENABLED_ISSUES: ${{ github.event.pull_request.reenabled-issues }}
- TORCH_CUDA_ARCH_LIST: "8.6"
- PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
- PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
- run: |
- pushd "${PYTORCH_FINAL_PACKAGE_DIR}"
- # shellcheck disable=SC2046,SC2102
- python3 -mpip install $(echo *.whl)[opt-einsum,optree]
- popd
-
- .ci/pytorch/win-test.sh
-
- - name: Upload pytest cache if tests failed
- uses: ./.github/actions/pytest-cache-upload
- continue-on-error: true
- if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure'
- with:
- cache_dir: .pytest_cache
- shard: ${{ matrix.shard }}
- sha: ${{ github.event.pull_request.head.sha || github.sha }}
- test_config: ${{ matrix.config }}
- job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Stop monitoring script
- if: always() && steps.monitor-script.outputs.monitor-script-pid
- shell: bash
- continue-on-error: true
- env:
- MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
- run: |
- kill "$MONITOR_SCRIPT_PID"
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
-
- - name: Parse ref
- id: parse-ref
- run: python3 .github/scripts/parse_ref.py
-
- - name: Uninstall PyTorch
- if: always()
- continue-on-error: true
- shell: bash
- run: |
- # This step removes PyTorch installed by the test to give a clean slate
- # to the next job
- python3 -mpip uninstall -y torch
-
- - name: Teardown Windows
- uses: ./.github/actions/teardown-win
- if: always()
- timeout-minutes: 120
diff --git a/.github/workflows/_xpu-test.yml b/.github/workflows/_xpu-test.yml
deleted file mode 100644
index d7af711f8adb4..0000000000000
--- a/.github/workflows/_xpu-test.yml
+++ /dev/null
@@ -1,271 +0,0 @@
-# TODO: this looks sort of similar to _linux-test, but there are like a dozen
-# places where you would have to insert an if statement. Probably it's better to
-# just use a different workflow altogether
-
-name: xpu-test
-
-on:
- workflow_call:
- inputs:
- build-environment:
- required: true
- type: string
- description: Top-level label for what's being built/tested.
- test-matrix:
- required: true
- type: string
- description: JSON description of what test configs to run.
- docker-image:
- required: true
- type: string
- description: Docker image to run in.
- sync-tag:
- required: false
- type: string
- default: ""
- description: |
- If this is set, our linter will use this to make sure that every other
- job with the same `sync-tag` is identical.
- timeout-minutes:
- required: false
- type: number
- default: 300
- description: |
- Set the maximum (in minutes) how long the workflow should take to finish
- tests-to-include:
- required: false
- type: string
- default: ""
- description: |
- List of tests to include (empty string implies default list)
-
-env:
- GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
-
-jobs:
- test:
- # Don't run on forked repos or empty test matrix
- if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
- strategy:
- matrix: ${{ fromJSON(inputs.test-matrix) }}
- fail-fast: false
- timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- runs-on: ${{ matrix.runner }}
- steps:
- # [see note: pytorch repo ref]
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup XPU
- uses: ./.github/actions/setup-xpu
-
- - name: configure aws credentials
- id: aws_creds
- uses: aws-actions/configure-aws-credentials@v1.7.0
- with:
- role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
- aws-region: us-east-1
-
- - name: Login to Amazon ECR
- id: login-ecr
- uses: aws-actions/amazon-ecr-login@v2
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ inputs.docker-image }}
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Start monitoring script
- id: monitor-script
- shell: bash
- continue-on-error: true
- run: |
- python3 -m pip install psutil==5.9.1 nvidia-ml-py==11.525.84
- python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
- echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
-
- - name: Download build artifacts
- uses: ./.github/actions/download-build-artifacts
- with:
- name: ${{ inputs.build-environment }}
-
- - name: Parse ref
- id: parse-ref
- run: .github/scripts/parse_ref.py
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Check for keep-going label and re-enabled test issues
- # This uses the filter-test-configs action because it conviniently
- # checks for labels and re-enabled test issues. It does not actually do
- # any filtering. All filtering is done in the build step.
- id: keep-going
- uses: ./.github/actions/filter-test-configs
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- test-matrix: ${{ inputs.test-matrix }}
- job-name: ${{ steps.get-job-id.outputs.job-name }}
-
- - name: Set Test step time
- id: test-timeout
- shell: bash
- env:
- JOB_TIMEOUT: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
- run: |
- echo "timeout=$((JOB_TIMEOUT-30))" >> "${GITHUB_OUTPUT}"
-
- - name: Test
- id: test
- env:
- BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
- BRANCH: ${{ steps.parse-ref.outputs.branch }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- PYTORCH_RETRY_TEST_CASES: 1
- PYTORCH_OVERRIDE_FLAKY_SIGNAL: 1
- CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
- VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
- NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
- NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
- TEST_CONFIG: ${{ matrix.config }}
- SHARD_NUMBER: ${{ matrix.shard }}
- NUM_TEST_SHARDS: ${{ matrix.num_shards }}
- REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
- SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
- DOCKER_IMAGE: ${{ inputs.docker-image }}
- XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
- PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
- PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
- TESTS_TO_INCLUDE: ${{ inputs.tests-to-include }}
- timeout-minutes: ${{ fromJson(steps.test-timeout.outputs.timeout) }}
- run: |
- set -x
-
- TEST_COMMAND=.ci/pytorch/test.sh
-
- # detached container should get cleaned up by teardown_ec2_linux
- # Used for GPU_FLAG since that doesn't play nice
- # shellcheck disable=SC2086,SC2090
- container_name=$(docker run \
- ${GPU_FLAG:-} \
- -e BUILD_ENVIRONMENT \
- -e PR_NUMBER \
- -e GITHUB_ACTIONS \
- -e GITHUB_REPOSITORY \
- -e GITHUB_WORKFLOW \
- -e GITHUB_JOB \
- -e GITHUB_RUN_ID \
- -e GITHUB_RUN_NUMBER \
- -e GITHUB_RUN_ATTEMPT \
- -e JOB_ID \
- -e BRANCH \
- -e SHA1 \
- -e AWS_DEFAULT_REGION \
- -e IN_WHEEL_TEST \
- -e SHARD_NUMBER \
- -e TEST_CONFIG \
- -e NUM_TEST_SHARDS \
- -e REENABLED_ISSUES \
- -e PYTORCH_RETRY_TEST_CASES \
- -e PYTORCH_OVERRIDE_FLAKY_SIGNAL \
- -e CONTINUE_THROUGH_ERROR \
- -e VERBOSE_TEST_LOGS \
- -e NO_TEST_TIMEOUT \
- -e NO_TD \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e SCCACHE_BUCKET \
- -e XLA_CLANG_CACHE_S3_BUCKET_NAME \
- -e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \
- -e PYTORCH_TEST_RERUN_DISABLED_TESTS \
- -e TESTS_TO_INCLUDE \
- -e ZE_AFFINITY_MASK \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --ulimit stack=10485760:83886080 \
- --ulimit core=0 \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --shm-size="8g" \
- --tty \
- --detach \
- --name="${container_name}" \
- --user jenkins \
- --privileged \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- # save container name for later step
- echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV"
- # jenkins user does not have write permission to mounted workspace; work-around by copying within container to jenkins home
- docker exec -t "${container_name}" sh -c "cd .. && cp -R workspace pytorch && cd pytorch && pip install dist/*.whl && ${TEST_COMMAND}"
-
- - name: Save test results
- if: always()
- run: |
- # copy test results back to the mounted workspace, needed sudo, resulting permissions were correct
- docker exec -t "${{ env.CONTAINER_NAME }}" sh -c "cd ../pytorch && sudo cp -R test/test-reports ../workspace/test"
-
- - name: Print remaining test logs
- shell: bash
- if: always() && steps.test.conclusion
- run: |
- cat test/**/*_toprint.log || true
-
- - name: Stop monitoring script
- if: always() && steps.monitor-script.outputs.monitor-script-pid
- shell: bash
- continue-on-error: true
- env:
- MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
- run: |
- kill "$MONITOR_SCRIPT_PID"
-
- - name: Upload test artifacts
- uses: ./.github/actions/upload-test-artifacts
- if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
- with:
- use-gha: true
- file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
-
- - name: Collect backtraces from coredumps (if any)
- if: always()
- run: |
- # shellcheck disable=SC2156
- find . -iname "core.[1-9]*" -exec docker exec "${CONTAINER_NAME}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \;
-
- - name: Stop container before exit
- if: always()
- run: |
- # Workaround for multiple runners on same IDC node
- docker stop "${{ env.CONTAINER_NAME }}"
-
- - name: Store Core dumps on GitHub
- uses: actions/upload-artifact@v3
- if: failure()
- with:
- name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}
- retention-days: 14
- if-no-files-found: ignore
- path: ./**/core.[1-9]*
-
- - name: Teardown XPU
- uses: ./.github/actions/teardown-xpu
diff --git a/.github/workflows/assigntome-docathon.yml b/.github/workflows/assigntome-docathon.yml
deleted file mode 100644
index 4948faacd6859..0000000000000
--- a/.github/workflows/assigntome-docathon.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-name: Assign User on Comment
-
-on:
- workflow_dispatch:
- issue_comment:
- types: [created]
-
-jobs:
- assign:
- runs-on: ubuntu-latest
- steps:
- - name: Check for "/assigntome" in comment
- uses: actions/github-script@v6
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- script: |
- const issueComment = context.payload.comment.body;
- const assignRegex = /\/assigntome/i;
- if (assignRegex.test(issueComment)) {
- const assignee = context.payload.comment.user.login;
- const issueNumber = context.payload.issue.number;
- try {
- const { data: issue } = await github.rest.issues.get({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: issueNumber
- });
- const hasLabel = issue.labels.some(label => label.name === 'docathon-h2-2023');
- if (hasLabel) {
- if (issue.assignee !== null) {
- await github.rest.issues.createComment({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: issueNumber,
- body: "The issue is already assigned. Please pick an opened and unnasigned issue with the [docathon-h2-2023 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h2-2023)."
- });
- } else {
- await github.rest.issues.addAssignees({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: issueNumber,
- assignees: [assignee]
- });
- }
- } else {
- const commmentMessage = "This issue does not have the correct label. Please pick an opened and unnasigned issue with the [docathon-h2-2023 label](https://github.com/pytorch/pytorch/issues?q=is%3Aopen+is%3Aissue+label%3Adocathon-h2-2023)."
- await github.rest.issues.createComment({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: issueNumber,
- body: commmentMessage
- });
- }
- } catch (error) {
- console.error(error);
- }
- }
diff --git a/.github/workflows/auto_request_review.yml b/.github/workflows/auto_request_review.yml
deleted file mode 100644
index 25eb72bc2faab..0000000000000
--- a/.github/workflows/auto_request_review.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-name: Auto Request Review
-
-on:
- pull_request:
- types: [opened, ready_for_review, reopened]
-jobs:
- auto-request-review:
- # Don't run on forked repos
- if: ${{ !github.event.pull_request.head.repo.fork }}
- permissions:
- contents: read
- pull-requests: write
- name: Auto Request Review
- runs-on: ubuntu-latest
- steps:
- - name: Request review based on files changes and/or groups the author belongs to
- # v0.7.0
- uses: necojackarc/auto-request-review@e08cdffa277d50854744de3f76230260e61c67f4
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
diff --git a/.github/workflows/build-android-binaries.yml b/.github/workflows/build-android-binaries.yml
deleted file mode 100644
index 7bf7865227951..0000000000000
--- a/.github/workflows/build-android-binaries.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: Build Android binaries
-
-on:
- push:
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- paths:
- - .github/workflows/build-android-binaries.yml
- - .github/workflows/_run_android_tests.yml
- - android/**
- pull_request:
- paths:
- - .github/workflows/build-android-binaries.yml
- - .github/workflows/_run_android_tests.yml
- - android/**
- # NB: We can use this workflow dispatch to test and build the binaries manually
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- android-build-test:
- name: android-build-test
- uses: ./.github/workflows/_run_android_tests.yml
- with:
- test-matrix: |
- { include: [
- { config: 'default',
- shard: 1,
- num_shards: 1,
- runner: 'ubuntu-20.04-16x',
- use_lite_interpreter: 1,
- support_abi: 'armeabi-v7a,arm64-v8a,x86,x86_64',
- },
- { config: 'default',
- shard: 1,
- num_shards: 1,
- runner: 'ubuntu-20.04-16x',
- use_lite_interpreter: 0,
- support_abi: 'armeabi-v7a,arm64-v8a,x86,x86_64',
- },
- ]}
diff --git a/.github/workflows/build-ios-binaries.yml b/.github/workflows/build-ios-binaries.yml
deleted file mode 100644
index 3f3be84f48bdf..0000000000000
--- a/.github/workflows/build-ios-binaries.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-name: Build iOS binaries
-
-on:
- push:
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- paths:
- - .github/workflows/build-ios-binaries.yml
- - .github/workflows/_ios-build-test.yml
- pull_request:
- paths:
- - .github/workflows/build-ios-binaries.yml
- - .github/workflows/_ios-build-test.yml
- # NB: We can use this workflow dispatch to test and build iOS binaries manually
- workflow_dispatch:
- inputs:
- use_lite_interpreter:
- description: "Use PyTorch lite interpreter?"
- type: string
- default: 1
- use_coreml:
- description: "Use Apple Core ML?"
- type: string
- default: 1
- use_custom_op_list:
- description: "Specify the custom ops list to include in the binaries"
- type: string
- default: ""
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- # TODO: Figure out how to migrate this job to M1 runner
- ios-build-test:
- name: ios-build-test
- uses: ./.github/workflows/_ios-build-test.yml
- with:
- trigger-event: ${{ github.event_name }}
- build-environment: ios-build-test
- sync-tag: ios-build-test
- test-matrix: |
- { include: [
- { config: "default",
- shard: 1,
- num_shards: 1,
- runner: "macos-13-xlarge",
- ios_platform: "SIMULATOR",
- ios_arch: "arm64",
- use_lite_interpreter: ${{ inputs.use_lite_interpreter || 1 }},
- use_metal: 0,
- use_coreml: ${{ inputs.use_coreml || 1 }},
- use_custom_op_list: ${{ inputs.use_custom_op_list || '' }}
- },
- { config: "default",
- shard: 1,
- num_shards: 1,
- runner: "macos-13-xlarge",
- ios_platform: "OS",
- ios_arch: "arm64",
- use_lite_interpreter: ${{ inputs.use_lite_interpreter || 1 }},
- use_metal: 1,
- use_coreml: ${{ inputs.use_coreml || 1 }},
- use_custom_op_list: ${{ inputs.use_custom_op_list || '' }}
- }
- ]}
- secrets:
- AWS_PYTORCH_MOBILE_UPLOADER_ACCESS_KEY_ID: ${{ secrets.AWS_PYTORCH_MOBILE_UPLOADER_ACCESS_KEY_ID }}
- AWS_PYTORCH_MOBILE_UPLOADER_SECRET_ACCESS_KEY: ${{ secrets.AWS_PYTORCH_MOBILE_UPLOADER_SECRET_ACCESS_KEY }}
diff --git a/.github/workflows/build-triton-wheel.yml b/.github/workflows/build-triton-wheel.yml
deleted file mode 100644
index ddba8ff8907cc..0000000000000
--- a/.github/workflows/build-triton-wheel.yml
+++ /dev/null
@@ -1,319 +0,0 @@
-name: Build Triton wheels
-
-on:
- push:
- branches:
- - main
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- paths:
- - .github/workflows/build-triton-wheel.yml
- - .github/scripts/build_triton_wheel.py
- - .github/ci_commit_pins/triton.txt
- - .ci/docker/ci_commit_pins/triton.txt
- - .ci/docker/ci_commit_pins/triton-rocm.txt
- pull_request:
- paths:
- - .github/workflows/build-triton-wheel.yml
- - .github/scripts/build_triton_wheel.py
- - .github/ci_commit_pins/triton.txt
- - .ci/docker/ci_commit_pins/triton.txt
- - .ci/docker/ci_commit_pins/triton-rocm.txt
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- build-wheel:
- name: "Build Triton Wheel"
- runs-on: [self-hosted, linux.2xlarge]
- strategy:
- fail-fast: false
- matrix:
- py_vers: [ "3.8", "3.9", "3.10", "3.11", "3.12" ]
- device: ["cuda", "rocm"]
- include:
- - device: "rocm"
- rocm_version: "6.1"
- - device: "cuda"
- rocm_version: ""
- timeout-minutes: 40
- env:
- DOCKER_IMAGE: ${{ matrix.device == 'rocm' && format('pytorch/manylinux-rocm:{0}', matrix.rocm_version) || 'pytorch/manylinux-builder:cpu' }}
- PY_VERS: ${{ matrix.py_vers }}
- BUILD_DEVICE: ${{ matrix.device }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ env.DOCKER_IMAGE }}
-
- - name: Build Triton wheel
- env:
- IS_RELEASE_TAG: ${{ startsWith(github.event.ref, 'refs/tags/v') }}
- run: |
- set -x
- mkdir -p "${RUNNER_TEMP}/artifacts/"
- container_name=$(docker run \
- --tty \
- --detach \
- -v "${GITHUB_WORKSPACE}:/pytorch" \
- -v "${RUNNER_TEMP}/artifacts:/artifacts" \
- -w /artifacts/ \
- "${DOCKER_IMAGE}" \
- )
-
- # Determine python executable for given version
- case $PY_VERS in
- 3.8)
- PYTHON_EXECUTABLE=/opt/python/cp38-cp38/bin/python
- ;;
- 3.9)
- PYTHON_EXECUTABLE=/opt/python/cp39-cp39/bin/python
- ;;
- 3.10)
- PYTHON_EXECUTABLE=/opt/python/cp310-cp310/bin/python
- ;;
- 3.11)
- PYTHON_EXECUTABLE=/opt/python/cp311-cp311/bin/python
- ;;
- 3.12)
- PYTHON_EXECUTABLE=/opt/python/cp312-cp312/bin/python
- ;;
- *)
- echo "Unsupported python version ${PY_VERS}"
- exit 1
- ;;
- esac
-
- BUILD_ROCM=""
- if [[ "$BUILD_DEVICE" == "rocm" ]]; then
- BUILD_ROCM="--build-rocm"
- fi
-
- RELEASE=""
- if [[ "${IS_RELEASE_TAG}" == true ]]; then
- RELEASE="--release"
- fi
-
- docker exec -t "${container_name}" yum install -y zlib-devel zip
- docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U setuptools==67.4.0
- docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" /pytorch/.github/scripts/build_triton_wheel.py $BUILD_ROCM $RELEASE
- docker exec -t "${container_name}" chown -R 1000.1000 /artifacts
-
- - uses: actions/upload-artifact@v3
- with:
- name: pytorch-triton-wheel-${{ matrix.py_vers }}-${{ matrix.device }}
- if-no-files-found: error
- path: ${{ runner.temp }}/artifacts/*
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
-
- upload-wheel:
- runs-on: ubuntu-22.04
- needs: build-wheel
- permissions:
- id-token: write
- contents: read
- container:
- image: continuumio/miniconda3:4.12.0
- environment: ${{ (github.event_name == 'push' && (github.event.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v'))) && 'conda-aws-upload' || '' }}
- steps:
- - uses: actions/checkout@v3
-
- - name: Configure AWS credentials(PyTorch account) for main
- if: ${{ github.event_name == 'push' && github.event.ref == 'refs/heads/main' }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: arn:aws:iam::749337293305:role/gha_workflow_nightly_build_wheels
- aws-region: us-east-1
-
- - name: Configure AWS credentials(PyTorch account) for RC builds
- if: ${{ github.event_name == 'push' && (startsWith(github.event.ref, 'refs/tags/') && !startsWith(github.event.ref, 'refs/tags/ciflow/')) }}
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: arn:aws:iam::749337293305:role/gha_workflow_test_build_wheels
- aws-region: us-east-1
-
- - name: Download Build Artifacts
- uses: actions/download-artifact@v3
- with:
- # Download all available artifacts
- path: ${{ runner.temp }}/artifacts-all
-
- - name: Select Wheel Artifacts
- shell: bash
- run: |
- set -x
- mkdir -p "${RUNNER_TEMP}/artifacts/"
- mv "${RUNNER_TEMP}"/artifacts-all/pytorch-triton-wheel-*/* "${RUNNER_TEMP}/artifacts/"
-
- - name: Set DRY_RUN (only for tagged pushes)
- if: ${{ github.event_name == 'push' && (github.event.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) }}
- shell: bash
- run: |
- echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
-
- - name: Set UPLOAD_CHANNEL (only for tagged pushes)
- if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') }}
- shell: bash
- run: |
- set -ex
-
- # reference ends with an RC suffix
- if [[ "${GITHUB_REF_NAME}" = *-rc[0-9]* ]]; then
- echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
- fi
-
- # NB: This step is gated by DRY_RUN, which is enabled everywhere except main and release branches
- - name: Upload binaries
- env:
- PACKAGE_TYPE: wheel
- # The UPLOAD_SUBFOLDER needs to be empty here so that triton wheels are uploaded
- # to nightly or test
- UPLOAD_SUBFOLDER: ""
- PKG_DIR: ${{ runner.temp }}/artifacts
- shell: bash
- run: |
- set -ex
- bash .circleci/scripts/binary_upload.sh
-
- build-conda:
- name: "Build Triton Conda"
- runs-on: [self-hosted, linux.2xlarge]
- strategy:
- fail-fast: false
- matrix:
- py_vers: [ "3.8", "3.9", "3.10", "3.11", "3.12" ]
- timeout-minutes: 40
- env:
- DOCKER_IMAGE: pytorch/conda-builder:cpu
- PY_VERS: ${{ matrix.py_vers }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ env.DOCKER_IMAGE }}
-
- - name: Build Triton conda package
- env:
- IS_RELEASE_TAG: ${{ startsWith(github.event.ref, 'refs/tags/v') }}
- run: |
- set -x
- mkdir -p "${RUNNER_TEMP}/artifacts/"
- container_name=$(docker run \
- --tty \
- --detach \
- -v "${GITHUB_WORKSPACE}:/pytorch" \
- -v "${RUNNER_TEMP}/artifacts:/artifacts" \
- -w /artifacts/ \
- "${DOCKER_IMAGE}" \
- )
-
- RELEASE=""
- if [[ "${IS_RELEASE_TAG}" == true ]]; then
- RELEASE="--release"
- fi
-
- docker exec -t "${container_name}" yum install -y llvm11 llvm11-devel llvm11-static llvm11-libs zlib-devel
- docker exec -t "${container_name}" python /pytorch/.github/scripts/build_triton_wheel.py --build-conda --py-version="${PY_VERS}" $RELEASE
- docker exec -t "${container_name}" chown -R 1000.1000 /artifacts
-
- - uses: actions/upload-artifact@v3
- with:
- name: pytorch-triton-conda-${{ matrix.py_vers }}
- if-no-files-found: error
- path: ${{ runner.temp }}/artifacts/*
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
-
- upload-conda:
- runs-on: ubuntu-22.04
- needs: build-conda
- container:
- image: continuumio/miniconda3:4.12.0
- environment: ${{ (github.event_name == 'push' && (github.event.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v'))) && 'conda-aws-upload' || '' }}
- steps:
- - uses: actions/checkout@v3
-
- - name: Download Build Artifacts
- uses: actions/download-artifact@v3
- with:
- # Download all available artifacts
- path: ${{ runner.temp }}/artifacts-all
-
- - name: Select Conda Artifacts
- shell: bash
- run: |
- set -x
- mkdir -p "${RUNNER_TEMP}/artifacts/"
- mv "${RUNNER_TEMP}"/artifacts-all/pytorch-triton-conda-*/* "${RUNNER_TEMP}/artifacts/"
-
- - name: Set DRY_RUN (only for tagged pushes)
- if: ${{ github.event_name == 'push' && (github.event.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) }}
- shell: bash
- run: |
- echo "DRY_RUN=disabled" >> "$GITHUB_ENV"
-
- - name: Set UPLOAD_CHANNEL (only for tagged pushes)
- if: ${{ github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') }}
- shell: bash
- run: |
- set -ex
-
- # reference ends with an RC suffix
- if [[ "${GITHUB_REF_NAME}" = *-rc[0-9]* ]]; then
- echo "UPLOAD_CHANNEL=test" >> "$GITHUB_ENV"
- fi
-
- # NB: This step is gated by DRY_RUN, which is enabled everywhere except nightly and release branches
- - name: Upload binaries to Anaconda
- env:
- PACKAGE_TYPE: conda
- PKG_DIR: ${{ runner.temp }}/artifacts
- # When running these on pull_request events these should be blank
- CONDA_PYTORCHBOT_TOKEN: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- CONDA_PYTORCHBOT_TOKEN_TEST: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- shell: bash
- run: |
- set -ex
-
- if [[ "${UPLOAD_CHANNEL:-nightly}" == "nightly" ]]; then
- export ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN}"
- else
- export ANACONDA_API_TOKEN="${CONDA_PYTORCHBOT_TOKEN_TEST}"
- fi
- bash .circleci/scripts/binary_upload.sh
diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml
deleted file mode 100644
index d638d588504f2..0000000000000
--- a/.github/workflows/check-labels.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Check Labels
-
-on:
- # We need pull_request_target to be able to post comments on PRs from forks.
- # Only allow pull_request_target when merging to main, not some historical branch.
- #
- # Make sure to don't introduce explicit checking out and installing/running
- # untrusted user code into this workflow!
- pull_request_target:
- types: [opened, synchronize, reopened, labeled, unlabeled]
- branches: [main]
-
- # To check labels on ghstack PRs.
- # Note: as pull_request doesn't trigger on PRs targeting main,
- # to test changes to the workflow itself one needs to create
- # a PR that targets a gh/**/base branch.
- pull_request:
- types: [opened, synchronize, reopened, labeled, unlabeled]
- branches: [gh/**/base]
-
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- check-labels:
- name: Check labels
- if: github.repository_owner == 'pytorch'
- runs-on: linux.20_04.4x
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
- fetch-depth: 1
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- architecture: x64
- check-latest: false
- cache: pip
- cache-dependency-path: |
- **/.github/requirements-gha-cache.txt
-
- - name: Install requirements
- id: requirements
- run: |
- pip install -r .github/requirements-gha-cache.txt --user
-
- - name: Check labels
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUM: ${{ github.event.number }}
- run: |
- set -ex
- python3 .github/scripts/check_labels.py "${PR_NUM}"
diff --git a/.github/workflows/check_mergeability_ghstack.yml b/.github/workflows/check_mergeability_ghstack.yml
deleted file mode 100644
index 562687564054f..0000000000000
--- a/.github/workflows/check_mergeability_ghstack.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-name: Check mergeability of ghstack PR
-
-on:
- pull_request:
- types: [opened, synchronize, reopened]
- branches: [gh/**/base]
-
-jobs:
- ghstack-mergeability-check:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Setup git
- shell: bash
- run: |
- git config --global user.email "pytorchmergebot@users.noreply.github.com"
- git config --global user.name "PyTorch MergeBot"
- git fetch origin main:main
-
- - name: Wait for orig branch
- shell: bash
- run: |
- BRANCH="${{ github.base_ref }}"
- echo "$BRANCH"
- BRANCH="${BRANCH%/base}/orig"
- echo "$BRANCH"
-
- WAIT_SECONDS=300
- END_WAIT=$((SECONDS+WAIT_SECONDS))
- BRANCH_EXISTS=0
-
- while [ $SECONDS -lt $END_WAIT ]; do
- git fetch --prune origin "${BRANCH}" || true
- if git rev-parse --verify "origin/${BRANCH}"; then
- BRANCH_EXISTS=1
- break
- fi
- echo "Waiting for branch ${BRANCH} to exist..."
- sleep 30 # Wait for 30 seconds before retrying
- done
-
- if [ $BRANCH_EXISTS -eq 0 ]; then
- echo "Branch ${BRANCH} not found after ${WAIT_SECONDS} seconds."
- echo "Mergeability check failed for infrastructure reasons."
- exit 1
- fi
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- cache: pip
- architecture: x64
-
- - run: pip install pyyaml==6.0 rockset==1.0.3
- shell: bash
-
- - name: Verify mergeability
- shell: bash
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUM: ${{ github.event.pull_request.number }}
- run: |
- set -ex
- python3 .github/scripts/trymerge.py --check-mergeability "${PR_NUM}"
-
- - name: Print debug info
- if: failure()
- shell: bash
- env:
- PR_NUM: ${{ github.event.pull_request.number }}
- run: |
- {
- echo "# PR $PR_NUM is not mergeable into main"
- echo "To debug, run the diagnostic workflow:"
- echo "https://github.com/pytorch/test-infra/actions/workflows/pr-dependencies-check.yml"
- } >> "$GITHUB_STEP_SUMMARY"
-
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
\ No newline at end of file
diff --git a/.github/workflows/cherry-pick.yml b/.github/workflows/cherry-pick.yml
deleted file mode 100644
index 059ad781d748d..0000000000000
--- a/.github/workflows/cherry-pick.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-name: Create a cherry pick from a PR
-
-on:
- repository_dispatch:
- types: [try-cherry-pick]
-
-jobs:
- cherry-pick:
- name: cherry-pick-pr-${{ github.event.client_payload.pr_num }}
- runs-on: ubuntu-latest
- environment: cherry-pick-bot
- env:
- GH_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- steps:
- - name: Checkout repo
- id: checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
- token: ${{ secrets.GH_PYTORCHBOT_CHERRY_PICK_TOKEN }}
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.11'
- cache: pip
-
- # Not the direct dependencies but the script uses trymerge
- - run: pip install pyyaml==6.0 rockset==1.0.3
-
- - name: Setup committer id
- run: |
- git config --global user.name "PyTorch Bot"
- git config --global user.email "pytorchbot@users.noreply.github.com"
-
- - name: Cherry pick the PR
- shell: bash
- env:
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- BRANCH: ${{ github.event.client_payload.branch }}
- CLASSIFICATION: ${{ github.event.client_payload.classification }}
- FIXES: ${{ github.event.client_payload.fixes || '' }}
- ACTOR: ${{ github.actor }}
- GITHUB_TOKEN: ${{ secrets.GH_PYTORCHBOT_CHERRY_PICK_TOKEN }}
- run: |
- set -ex
-
- python .github/scripts/cherry_pick.py \
- --onto-branch "${BRANCH}" \
- --classification "${CLASSIFICATION}" \
- --fixes "${FIXES}" \
- --github-actor "${ACTOR}" \
- "${PR_NUM}"
-
-concurrency:
- group: cherry-pick-pr-${{ github.event.client_payload.pr_num }}
- cancel-in-progress: true
diff --git a/.github/workflows/close-nonexistent-disable-issues.yml b/.github/workflows/close-nonexistent-disable-issues.yml
deleted file mode 100644
index f384295b84b8a..0000000000000
--- a/.github/workflows/close-nonexistent-disable-issues.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-name: Close nonexistent disable issues
-
-on:
- schedule:
- - cron: 5 22 * * 5 # this should be about 3PM PT on Friday
-
-jobs:
- close-nonexistent-disable-issues:
- environment: rockset-read-only
- if: github.repository_owner == 'pytorch'
- runs-on: ubuntu-latest
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Run close_nonexistent_disable_issues.py
- env:
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- pip3 install requests==2.26
- pip3 install rockset==1.0.3
- python3 .github/scripts/close_nonexistent_disable_issues.py
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index df850f2523a89..5c4b38a9b3136 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -17,7 +17,7 @@ on:
pull_request:
branches: [ "main" ]
schedule:
- - cron: '40 12 * * 1'
+ - cron: '38 2 * * 0'
jobs:
analyze:
@@ -44,12 +44,14 @@ jobs:
fail-fast: false
matrix:
include:
- - language: c-cpp
- build-mode: autobuild
- - language: javascript-typescript
- build-mode: none
- - language: python
- build-mode: none
+ - language: java-kotlin
+ build-mode: none # This mode only analyzes Java. Set this to 'autobuild' or 'manual' to analyze Kotlin too.
+ - language: javascript-typescript
+ build-mode: none
+ - language: python
+ build-mode: none
+ - language: ruby
+ build-mode: none
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
@@ -59,39 +61,39 @@ jobs:
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- - name: Checkout repository
- uses: actions/checkout@v4
+ - name: Checkout repository
+ uses: actions/checkout@v4
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v3
- with:
- languages: ${{ matrix.language }}
- build-mode: ${{ matrix.build-mode }}
- # If you wish to specify custom queries, you can do so here or in a config file.
- # By default, queries listed here will override any specified in a config file.
- # Prefix the list here with "+" to use these queries and those in the config file.
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: ${{ matrix.language }}
+ build-mode: ${{ matrix.build-mode }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
- # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
- # queries: security-extended,security-and-quality
+ # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
+ # queries: security-extended,security-and-quality
- # If the analyze step fails for one of the languages you are analyzing with
- # "We were unable to automatically build your code", modify the matrix above
- # to set the build mode to "manual" for that language. Then modify this step
- # to build your code.
- # ℹ️ Command-line programs to run using the OS shell.
- # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- - if: matrix.build-mode == 'manual'
- shell: bash
- run: |
- echo 'If you are using a "manual" build mode for one or more of the' \
- 'languages you are analyzing, replace this with the commands to build' \
- 'your code, for example:'
- echo ' make bootstrap'
- echo ' make release'
- exit 1
+ # If the analyze step fails for one of the languages you are analyzing with
+ # "We were unable to automatically build your code", modify the matrix above
+ # to set the build mode to "manual" for that language. Then modify this step
+ # to build your code.
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
+ - if: matrix.build-mode == 'manual'
+ shell: bash
+ run: |
+ echo 'If you are using a "manual" build mode for one or more of the' \
+ 'languages you are analyzing, replace this with the commands to build' \
+ 'your code, for example:'
+ echo ' make bootstrap'
+ echo ' make release'
+ exit 1
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v3
- with:
- category: "/language:${{matrix.language}}"
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
deleted file mode 100644
index c80b61c22c5e7..0000000000000
--- a/.github/workflows/create_release.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-name: Create Release
-
-on:
- push:
- branches:
- - main
- - release/*
- release:
- types: [published]
- pull_request:
- paths: [.github/workflows/create_release.yml]
-
-jobs:
- release:
- if: ${{ github.repository == 'pytorch/pytorch' }}
- name: Create Release
- runs-on: ubuntu-latest
- # https://github.com/softprops/action-gh-release?tab=readme-ov-file#permissions
- permissions:
- contents: write
- steps:
- - uses: malfet/checkout@silent-checkout
- with:
- submodules: 'recursive'
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- - name: Fake name for PRs
- if: ${{ github.event_name == 'pull_request' }}
- run: echo "PT_GITHUB_REF=refs/tags/pr-tag" >> "$GITHUB_ENV"
- - name: Real name for non-PRs
- if: ${{ github.event_name != 'pull_request' }}
- run: echo "PT_GITHUB_REF=$GITHUB_REF" >> "$GITHUB_ENV"
- - name: Set filenames
- run: |
- tag_or_branch="${PT_GITHUB_REF#refs/tags/}"
- tag_or_branch="${tag_or_branch#refs/heads/}"
- # replace directory separators with _ in branch name
- tag_or_branch="${tag_or_branch//\//_}"
- echo "PT_RELEASE_NAME=pytorch-$tag_or_branch" >> "$GITHUB_ENV"
- echo "PT_RELEASE_FILE=pytorch-$tag_or_branch.tar.gz" >> "$GITHUB_ENV"
- - name: Create source distribution
- run: |
- # Create new folder with specified name so extracting the archive yields that
- rm -rf "/tmp/$PT_RELEASE_NAME"
- cp -r "$PWD" "/tmp/$PT_RELEASE_NAME"
- mv "/tmp/$PT_RELEASE_NAME" .
- # Cleanup
- rm -rf "$PT_RELEASE_NAME"/{.circleci,.ci}
- find "$PT_RELEASE_NAME" -name '.git*' -exec rm -rv {} \; || true
- # Create archive
- tar -czf "$PT_RELEASE_FILE" "$PT_RELEASE_NAME"
- echo "Created source archive $PT_RELEASE_FILE with content: $(ls -a "$PT_RELEASE_NAME")"
- - name: Upload source distribution
- if: ${{ github.event_name == 'release' }}
- uses: softprops/action-gh-release@v1
- with:
- files: ${{env.PT_RELEASE_FILE}}
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
- cancel-in-progress: true
diff --git a/.github/workflows/delete_old_branches.yml b/.github/workflows/delete_old_branches.yml
deleted file mode 100644
index 04a0521419a8e..0000000000000
--- a/.github/workflows/delete_old_branches.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# A workflow that deletes branches of closed PRs
-
-name: Delete old branches
-
-on:
- schedule:
- # Run daily.
- - cron: 30 1 * * *
- workflow_dispatch:
-
-concurrency:
- group: delete-old-branches
- cancel-in-progress: true
-
-permissions:
- contents: write
-
-jobs:
- delete:
- if: ${{ github.repository == 'pytorch/pytorch' }}
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout repo
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- architecture: x64
- check-latest: false
-
- - name: Delete old branches
- run: python .github/scripts/delete_old_branches.py
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/docker-builds.yml b/.github/workflows/docker-builds.yml
deleted file mode 100644
index 9f0dfe973dc9f..0000000000000
--- a/.github/workflows/docker-builds.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-name: docker-builds
-
-on:
- workflow_dispatch:
- pull_request:
- paths:
- - .ci/docker/**
- - .github/workflows/docker-builds.yml
- - .lintrunner.toml
- push:
- branches:
- - main
- - release/*
- - landchecks/*
- paths:
- - .ci/docker/**
- - .github/workflows/docker-builds.yml
- - .lintrunner.toml
- schedule:
- - cron: 1 3 * * 3
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-env:
- ALPINE_IMAGE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine
- AWS_DEFAULT_REGION: us-east-1
-
-permissions: read-all
-
-jobs:
- docker-build:
- environment: ${{ (github.ref == 'refs/heads/main' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
- timeout-minutes: 240
- strategy:
- fail-fast: false
- matrix:
- runner: [linux.12xlarge]
- docker-image-name: [
- pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9,
- pytorch-linux-focal-cuda12.4-cudnn8-py3-gcc9-inductor-benchmarks,
- pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9,
- pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks,
- pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9,
- pytorch-linux-focal-py3.8-clang10,
- pytorch-linux-focal-py3.11-clang10,
- pytorch-linux-focal-py3.12-clang10,
- pytorch-linux-focal-rocm-n-1-py3,
- pytorch-linux-focal-rocm-n-py3,
- pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12,
- pytorch-linux-focal-py3-clang9-android-ndk-r21e,
- pytorch-linux-jammy-py3.8-gcc11,
- pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks,
- pytorch-linux-jammy-xpu-2024.0-py3,
- pytorch-linux-jammy-py3-clang15-asan,
- pytorch-linux-focal-py3-clang10-onnx,
- pytorch-linux-focal-linter,
- pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter,
- pytorch-linux-jammy-py3-clang12-executorch
- ]
- include:
- - docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
- runner: linux.arm64.2xlarge
- runs-on: [self-hosted, "${{ matrix.runner }}"]
- env:
- DOCKER_IMAGE_BASE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/${{ matrix.docker-image-name }}
- steps:
- - name: Clean workspace
- shell: bash
- run: |
- echo "${GITHUB_WORKSPACE}"
- sudo rm -rf "${GITHUB_WORKSPACE}"
- mkdir "${GITHUB_WORKSPACE}"
-
- # [see note: pytorch repo ref]
- # deep clone (fetch-depth 0) required for git merge-base
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Build docker image
- id: build-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: ${{ matrix.docker-image-name }}
- always-rebuild: true
- push: true
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.build-docker-image.outputs.docker-image }}
-
- - uses: nick-fields/retry@3e91a01664abd3c5cd539100d10d33b9c5b68482
- name: Push to https://https://ghcr.io/
- id: push-to-ghcr-io
- if: ${{ github.event_name == 'push' }}
- env:
- ECR_DOCKER_IMAGE: ${{ steps.build-docker-image.outputs.docker-image }}
- GHCR_PAT: ${{ secrets.GHCR_PAT }}
- IMAGE_NAME: ${{ matrix.docker-image-name }}
- with:
- shell: bash
- timeout_minutes: 15
- max_attempts: 5
- retry_wait_seconds: 90
- command: |
- ghcr_image="ghcr.io/pytorch/ci-image"
- tag=${ECR_DOCKER_IMAGE##*:}
- # Push docker image to the ghcr.io
- echo $GHCR_PAT | docker login ghcr.io -u pytorch --password-stdin
- docker tag "${ECR_DOCKER_IMAGE}" "${ghcr_image}:${IMAGE_NAME}-${tag}"
- docker push "${ghcr_image}:${IMAGE_NAME}-${tag}"
-
- - name: Chown workspace
- uses: ./.github/actions/chown-workspace
- with:
- ALPINE_IMAGE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/${{ (matrix.runner == 'linux.arm64.2xlarge') && 'arm64v8' || 'tool' }}/alpine
- if: always()
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml
deleted file mode 100644
index 9f5221a88f9c8..0000000000000
--- a/.github/workflows/docker-release.yml
+++ /dev/null
@@ -1,151 +0,0 @@
-name: Build Official Docker Images
-
-on:
- workflow_dispatch:
- pull_request:
- paths:
- - Dockerfile
- - docker.Makefile
- - .github/workflows/docker-release.yml
- - .github/scripts/generate_docker_release_matrix.py
- push:
- branches:
- - nightly
- tags:
- # Final Release tags look like: v1.11.0
- - v[0-9]+.[0-9]+.[0-9]+
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - ciflow/nightly/*
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-env:
- BUILD_PROGRESS: plain
- BUILD_TYPE: official
- DOCKER_ORG: pytorch
- DOCKER_REGISTRY: ghcr.io
- NO_BUILD_SUFFIX: true
- USE_BUILDX: 1
- WITH_PUSH: ${{ github.event_name == 'push' && (github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/v')) }}
-
-permissions: read-all
-
-jobs:
- generate-matrix:
- if: github.repository_owner == 'pytorch'
- runs-on: [self-hosted, linux.large]
- outputs:
- matrix: ${{ steps.generate-matrix.outputs.matrix }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: true
- - name: Get docker release matrix
- id: generate-matrix
- run: |
- MATRIX_BLOB="$(python3 .github/scripts/generate_docker_release_matrix.py)"
- echo "${MATRIX_BLOB}"
- echo "matrix=${MATRIX_BLOB}" >> "${GITHUB_OUTPUT}"
-
- build:
- if: ${{ github.repository == 'pytorch/pytorch' }}
- runs-on: [self-hosted, linux.2xlarge]
- environment: ${{ (github.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/v')) && 'docker-build' || '' }}
- timeout-minutes: 240
- needs: generate-matrix
- strategy:
- matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
- fail-fast: false
- env:
- BUILD_IMAGE_TYPE: ${{ matrix.image_type }}
- BUILD_PLATFORMS: ${{ matrix.platform }}
- CUDA_VERSION: ${{ matrix.cuda_full_version }}
- CUDA_VERSION_SHORT: ${{ matrix.cuda }}
- CUDNN_VERSION: ${{ matrix.cudnn_version }}
- steps:
- - name: Setup SSH (Click me for login details)
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # [see note: pytorch repo ref]
- # deep clone (fetch-depth 0) required for git merge-base
- - name: Checkout PyTorch
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
- submodules: 'recursive'
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
- - name: Login to GitHub Container Registry
- if: ${{ env.WITH_PUSH == 'true' }}
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: pytorch
- password: ${{ secrets.GHCR_PAT }}
- # Setup multi-arch image builds
- - name: Set up QEMU
- uses: docker/setup-qemu-action@v2
- env:
- QEMU_BINARY_PATH: ${{ runner.temp }}/bin
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
- with:
- version: v0.10.0
- - name: Setup job specific variables
- run: |
- set -eou pipefail
- # To get QEMU binaries in our PATH
- echo "${RUNNER_TEMP}/bin" >> "${GITHUB_PATH}"
- # Generate PyTorch version to use
- echo "PYTORCH_VERSION=$(python3 .github/scripts/generate_pytorch_version.py --no-build-suffix)" >> "${GITHUB_ENV}"
- - name: Setup test specific variables
- if: ${{ startsWith(github.event.ref, 'refs/tags/v') }}
- run: |
- if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc[0-9]+$ ]]; then
- {
- echo "DOCKER_IMAGE=pytorch-test";
- echo "INSTALL_CHANNEL=pytorch-test";
- echo "TRITON_VERSION=$(cut -f 1 .ci/docker/triton_version.txt)";
- } >> "${GITHUB_ENV}"
- fi
- - name: Setup nightly specific variables
- if: ${{ github.event.ref == 'refs/heads/nightly' || startsWith(github.event.ref, 'refs/tags/ciflow/nightly/') }}
- run: |
- {
- echo "DOCKER_IMAGE=pytorch-nightly";
- echo "INSTALL_CHANNEL=pytorch-nightly";
- echo "TRITON_VERSION=$(cut -f 1 .ci/docker/triton_version.txt)+$(cut -c -10 .ci/docker/ci_commit_pins/triton.txt)";
- } >> "${GITHUB_ENV}"
- - name: Run docker build / push
- # WITH_PUSH is used here to determine whether or not to add the --push flag
- run: |
- make -f docker.Makefile "${BUILD_IMAGE_TYPE}-image"
- - name: Push nightly tags
- if: ${{ github.event.ref == 'refs/heads/nightly' && matrix.image_type == 'runtime' && matrix.build_platforms == 'linux/amd4' }}
- run: |
- PYTORCH_DOCKER_TAG="${PYTORCH_VERSION}-cuda${CUDA_VERSION_SHORT}-cudnn${CUDNN_VERSION}-runtime"
- CUDA_SUFFIX="-cu${CUDA_VERSION}"
- PYTORCH_NIGHTLY_COMMIT=$(docker run ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_DOCKER_TAG}" \
- python -c 'import torch; print(torch.version.git_version[:7],end="")')
-
- docker tag ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_DOCKER_TAG}" \
- ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_NIGHTLY_COMMIT}${CUDA_SUFFIX}"
-
- docker push ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_NIGHTLY_COMMIT}${CUDA_SUFFIX}"
-
- # Please note, here we ned to pin specific verison of CUDA as with latest label
- if [[ ${CUDA_VERSION_SHORT} == "12.1" ]]; then
- docker tag ghcr.io/pytorch/pytorch-nightly:"${PYTORCH_NIGHTLY_COMMIT}${CUDA_SUFFIX}" \
- ghcr.io/pytorch/pytorch-nightly:latest
- docker push ghcr.io/pytorch/pytorch-nightly:latest
- fi
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
deleted file mode 100644
index 79a73abda9f76..0000000000000
--- a/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
+++ /dev/null
@@ -1,353 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-aarch64-binary-manywheel
-
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_wheel/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "arm64v8/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-aarch64-binary-manywheel
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-aarch64-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- manywheel-py3_8-cpu-aarch64-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.8"
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- build_name: manywheel-py3_8-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-aarch64-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cpu-aarch64-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-aarch64-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cpu-aarch64-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-aarch64
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cpu-aarch64-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.9"
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- build_name: manywheel-py3_9-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-aarch64-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cpu-aarch64-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-aarch64-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cpu-aarch64-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-aarch64
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cpu-aarch64-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.10"
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- build_name: manywheel-py3_10-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-aarch64-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cpu-aarch64-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-aarch64-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cpu-aarch64-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-aarch64
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cpu-aarch64-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.11"
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- build_name: manywheel-py3_11-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-aarch64-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cpu-aarch64-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-aarch64-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cpu-aarch64-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-aarch64
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cpu-aarch64-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.12"
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- build_name: manywheel-py3_12-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-aarch64-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cpu-aarch64-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-aarch64
- build_environment: linux-aarch64-binary-manywheel
- runs_on: linux.arm64.2xlarge
- ALPINE_IMAGE: "arm64v8/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-aarch64-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cpu-aarch64-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-aarch64
- DOCKER_IMAGE: pytorch/manylinuxaarch64-builder:cpu-aarch64-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-aarch64
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-linux-binary-conda-nightly.yml b/.github/workflows/generated-linux-binary-conda-nightly.yml
deleted file mode 100644
index 50a6d986255f7..0000000000000
--- a/.github/workflows/generated-linux-binary-conda-nightly.yml
+++ /dev/null
@@ -1,1278 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-conda
-
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_conda/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-conda
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-conda-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- conda-py3_8-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cpu
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cpu
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_8-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- runs_on: linux.24xlarge
- build_name: conda-py3_8-cuda11_8
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda11_8
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_8-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- runs_on: linux.24xlarge
- build_name: conda-py3_8-cuda12_1
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda12_1
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_8-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.8"
- runs_on: linux.24xlarge
- build_name: conda-py3_8-cuda12_4
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda12_4
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_8-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_9-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cpu
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cpu
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_9-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.9"
- runs_on: linux.24xlarge
- build_name: conda-py3_9-cuda11_8
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda11_8
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_9-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.9"
- runs_on: linux.24xlarge
- build_name: conda-py3_9-cuda12_1
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda12_1
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_9-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.9"
- runs_on: linux.24xlarge
- build_name: conda-py3_9-cuda12_4
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda12_4
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_9-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_10-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cpu
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cpu
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_10-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.10"
- runs_on: linux.24xlarge
- build_name: conda-py3_10-cuda11_8
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda11_8
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_10-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.10"
- runs_on: linux.24xlarge
- build_name: conda-py3_10-cuda12_1
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda12_1
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_10-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.10"
- runs_on: linux.24xlarge
- build_name: conda-py3_10-cuda12_4
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda12_4
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_10-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_11-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cpu
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cpu
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_11-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.11"
- runs_on: linux.24xlarge
- build_name: conda-py3_11-cuda11_8
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda11_8
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_11-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.11"
- runs_on: linux.24xlarge
- build_name: conda-py3_11-cuda12_1
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda12_1
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_11-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.11"
- runs_on: linux.24xlarge
- build_name: conda-py3_11-cuda12_4
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda12_4
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_11-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_12-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cpu
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cpu
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_12-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.12"
- runs_on: linux.24xlarge
- build_name: conda-py3_12-cuda11_8
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda11_8
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda11.8-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_12-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.12"
- runs_on: linux.24xlarge
- build_name: conda-py3_12-cuda12_1
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda12_1
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.1-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- conda-py3_12-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.12"
- runs_on: linux.24xlarge
- build_name: conda-py3_12-cuda12_4
- build_environment: linux-binary-conda
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda12_4
- build_environment: linux-binary-conda
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-py3_12-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/conda-builder:cuda12.4-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
deleted file mode 100644
index 5577a5e7d9c3a..0000000000000
--- a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-libtorch-cxx11-abi
-
-
-on:
- push:
- branches:
- - main
- tags:
- - 'ciflow/trunk/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-libtorch-cxx11-abi
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cpu-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cpu-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cpu-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
deleted file mode 100644
index d400e82249867..0000000000000
--- a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
+++ /dev/null
@@ -1,507 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-libtorch-cxx11-abi
-
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_libtorch/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-libtorch-cxx11-abi
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cpu-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cpu-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cpu-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cpu-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cpu-shared-with-deps-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cpu-shared-with-deps-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-cuda11_8-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda11_8-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda11_8-shared-with-deps-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda11_8-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda11_8-shared-with-deps-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda11.8-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda11_8-shared-with-deps-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-cuda12_1-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda12_1-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_1-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_1-shared-with-deps-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda12_1-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_1-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_1-shared-with-deps-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda12_1-shared-with-deps-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-cuda12_4-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda12_4-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_4-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_4-shared-with-deps-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda12_4-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_4-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_4-shared-with-deps-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cuda12.4-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cuda12_4-shared-with-deps-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-rocm6_0-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm6_0-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm6_0-shared-with-deps-cxx11-abi-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/libtorch-cxx11-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- libtorch-rocm6_0-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-rocm6_0-shared-with-deps-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-rocm6_1-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
- build_environment: linux-binary-libtorch-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm6_1-shared-with-deps-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm6_1-shared-with-deps-cxx11-abi-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/libtorch-cxx11-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- libtorch-rocm6_1-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-rocm6_1-shared-with-deps-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
deleted file mode 100644
index 0158860d6f942..0000000000000
--- a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
+++ /dev/null
@@ -1,72 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-libtorch-pre-cxx11
-
-
-on:
- push:
- branches:
- - main
- tags:
- - 'ciflow/trunk/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-libtorch-pre-cxx11
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-libtorch-pre-cxx11-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cpu-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cpu-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-pre-cxx11-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cpu-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
deleted file mode 100644
index 3205c3c78dad4..0000000000000
--- a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
+++ /dev/null
@@ -1,507 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-libtorch-pre-cxx11
-
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_libtorch/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-libtorch-pre-cxx11
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-libtorch-pre-cxx11-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cpu-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cpu-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-pre-cxx11-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cpu-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cpu-shared-with-deps-pre-cxx11-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cpu-shared-with-deps-pre-cxx11-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cpu-shared-with-deps-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-cuda11_8-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda11_8-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda11_8-shared-with-deps-pre-cxx11-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda11_8-shared-with-deps-pre-cxx11-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda11_8-shared-with-deps-pre-cxx11-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda11_8-shared-with-deps-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-cuda12_1-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda12_1-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_1-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_1-shared-with-deps-pre-cxx11-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda12_1-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_1-shared-with-deps-pre-cxx11-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_1-shared-with-deps-pre-cxx11-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda12_1-shared-with-deps-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-cuda12_4-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda12_4-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_4-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_4-shared-with-deps-pre-cxx11-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda12_4-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-cuda12_4-shared-with-deps-pre-cxx11-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_4-shared-with-deps-pre-cxx11-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-cuda12_4-shared-with-deps-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-rocm6_0-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm6_0-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm6_0-shared-with-deps-pre-cxx11-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- libtorch-rocm6_0-shared-with-deps-pre-cxx11-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-rocm6_0-shared-with-deps-pre-cxx11-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- libtorch-rocm6_1-shared-with-deps-pre-cxx11-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm6_1-shared-with-deps-pre-cxx11
- build_environment: linux-binary-libtorch-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm6_1-shared-with-deps-pre-cxx11-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm6_1-shared-with-deps-pre-cxx11-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-rocm6_1-shared-with-deps-pre-cxx11
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- libtorch-rocm6_1-shared-with-deps-pre-cxx11-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-rocm6_1-shared-with-deps-pre-cxx11-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm6_1-shared-with-deps-pre-cxx11
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-linux-binary-manywheel-main.yml b/.github/workflows/generated-linux-binary-manywheel-main.yml
deleted file mode 100644
index 4764ede6bcb2c..0000000000000
--- a/.github/workflows/generated-linux-binary-manywheel-main.yml
+++ /dev/null
@@ -1,113 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-manywheel
-
-
-on:
- push:
- branches:
- - main
- tags:
- - 'ciflow/trunk/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-manywheel
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- manywheel-py3_8-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda11_8
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda11_8
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- manywheel-py3_8-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_1
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_1
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/generated-linux-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-binary-manywheel-nightly.yml
deleted file mode 100644
index 8ad43b4c36607..0000000000000
--- a/.github/workflows/generated-linux-binary-manywheel-nightly.yml
+++ /dev/null
@@ -1,2618 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-binary-manywheel
-
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_wheel/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-binary-manywheel
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- manywheel-py3_8-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_8-cpu-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cpu-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cpu-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_8-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda11_8
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda11_8
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_8-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_1
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_1
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_8-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_4
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_4
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_8-rocm6_0-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm6_0
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-rocm6_0-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-rocm6_0-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_8-rocm6_0
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_8-rocm6_0-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-rocm6_0-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm6_0
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_8-rocm6_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm6_1
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-rocm6_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-rocm6_1-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_8-rocm6_1
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_8-rocm6_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-rocm6_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm6_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cpu-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cpu-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cpu-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda11_8
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda11_8
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda12_1
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda12_1
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda12_4
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda12_4
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-rocm6_0-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm6_0
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-rocm6_0-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-rocm6_0-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_9-rocm6_0
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_9-rocm6_0-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-rocm6_0-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm6_0
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-rocm6_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm6_1
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-rocm6_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-rocm6_1-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_9-rocm6_1
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_9-rocm6_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-rocm6_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm6_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cpu-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cpu-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cpu-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda11_8
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda11_8
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda12_1
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda12_1
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda12_4
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda12_4
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-rocm6_0-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm6_0
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-rocm6_0-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-rocm6_0-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_10-rocm6_0
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_10-rocm6_0-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-rocm6_0-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm6_0
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-rocm6_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm6_1
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-rocm6_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-rocm6_1-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_10-rocm6_1
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_10-rocm6_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-rocm6_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm6_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cpu-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cpu-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cpu-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda11_8
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda11_8
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda12_1
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda12_1
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda12_4
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda12_4
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-rocm6_0-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm6_0
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-rocm6_0-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-rocm6_0-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_11-rocm6_0
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_11-rocm6_0-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-rocm6_0-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm6_0
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-rocm6_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm6_1
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-rocm6_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-rocm6_1-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_11-rocm6_1
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_11-rocm6_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-rocm6_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm6_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cpu-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cpu-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cpu-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-cxx11-abi-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cpu-cxx11-abi-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-cxx11-abi
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cpu-cxx11-abi-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu-cxx11-abi
- GPU_ARCH_TYPE: cpu-cxx11-abi
- DOCKER_IMAGE: pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-main
- DESIRED_DEVTOOLSET: cxx11-abi
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-cxx11-abi
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda11_8
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==8.7.0.84; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cuda11_8-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda11_8
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cuda11_8-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda12_1
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cuda12_1-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda12_1
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cuda12_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda12_4
- build_environment: linux-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.7.29; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cuda12_4-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda12_4
- build_environment: linux-binary-manywheel
- runs_on: linux.4xlarge.nvidia.gpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cuda12_4-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-rocm6_0-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm6_0
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-rocm6_0-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-rocm6_0-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_12-rocm6_0
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_12-rocm6_0-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-rocm6_0-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm6_0
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-rocm6_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm6_1
- build_environment: linux-binary-manywheel
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-rocm6_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-rocm6_1-build
- runs-on: linux.rocm.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Setup ROCm
- uses: ./.github/actions/setup-rocm
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: manywheel-py3_12-rocm6_1
- path: "${{ runner.temp }}/artifacts/"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: ROCm set GPU_FLAG
- run: |
- echo "GPU_FLAG=--device=/dev/mem --device=/dev/kfd --device=/dev/dri --group-add video --group-add daemon" >> "${GITHUB_ENV}"
- - name: Pull Docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: pytorch/manylinux-builder:rocm6.1-main
- - name: Test Pytorch binary
- uses: ./pytorch/.github/actions/test-pytorch-binary
- - name: Teardown ROCm
- uses: ./.github/actions/teardown-rocm
- manywheel-py3_12-rocm6_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-rocm6_1-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.1
- GPU_ARCH_VERSION: 6.1
- GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm6_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
deleted file mode 100644
index 4f0569c253f23..0000000000000
--- a/.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
+++ /dev/null
@@ -1,353 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/linux_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: linux-s390x-binary-manywheel
-
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_wheel/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BINARY_ENV_FILE: /tmp/env
- BUILD_ENVIRONMENT: linux-s390x-binary-manywheel
- BUILDER_ROOT: /builder
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- PYTORCH_FINAL_PACKAGE_DIR: /artifacts
- PYTORCH_ROOT: /pytorch
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: linux-s390x-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- manywheel-py3_8-cpu-s390x-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.8"
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- build_name: manywheel-py3_8-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-s390x-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-cpu-s390x-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-cpu-s390x-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_8-cpu-s390x-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-cpu-s390x
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_9-cpu-s390x-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.9"
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- build_name: manywheel-py3_9-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-s390x-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-cpu-s390x-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-cpu-s390x-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_9-cpu-s390x-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-cpu-s390x
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_10-cpu-s390x-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.10"
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- build_name: manywheel-py3_10-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-s390x-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-cpu-s390x-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-cpu-s390x-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_10-cpu-s390x-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-cpu-s390x
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_11-cpu-s390x-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.11"
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- build_name: manywheel-py3_11-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-s390x-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-cpu-s390x-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-cpu-s390x-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_11-cpu-s390x-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-cpu-s390x
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
-
- manywheel-py3_12-cpu-s390x-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- uses: ./.github/workflows/_binary-build-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.12"
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- build_name: manywheel-py3_12-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-s390x-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-cpu-s390x-build
- uses: ./.github/workflows/_binary-test-linux.yml
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-s390x
- build_environment: linux-s390x-binary-manywheel
- runs_on: linux.s390x
- ALPINE_IMAGE: "docker.io/s390x/alpine"
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-cpu-s390x-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: manywheel-py3_12-cpu-s390x-test
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: manywheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu-s390x
- DOCKER_IMAGE: pytorch/manylinuxs390x-builder:cpu-s390x-main
- DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-cpu-s390x
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml b/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
deleted file mode 100644
index a8cbdb7cd6feb..0000000000000
--- a/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
+++ /dev/null
@@ -1,624 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: macos-arm64-binary-conda
-
-on:
-# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_conda/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: macos-arm64-binary-conda
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: macos-arm64-binary-conda-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- conda-py3_8-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_8-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- conda-py3_8-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_9-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_9-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- conda-py3_9-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_10-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_10-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- conda-py3_10-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_11-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_11-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- conda-py3_11-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_12-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_12-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- conda-py3_12-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/conda-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml b/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
deleted file mode 100644
index 0ed7ba10a07d5..0000000000000
--- a/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
+++ /dev/null
@@ -1,157 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: macos-arm64-binary-libtorch-cxx11-abi
-
-on:
-# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_libtorch/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: macos-arm64-binary-libtorch-cxx11-abi
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: macos-arm64-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-cxx11-abi-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cpu-shared-with-deps-cxx11-abi
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- libtorch-cpu-shared-with-deps-cxx11-abi-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cpu-shared-with-deps-cxx11-abi-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:cpu-main
- LIBTORCH_VARIANT: shared-with-deps
- DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-cpu-shared-with-deps-cxx11-abi
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml b/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
deleted file mode 100644
index 167161de3645c..0000000000000
--- a/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
+++ /dev/null
@@ -1,629 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/macos_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: macos-arm64-binary-wheel
-
-on:
-# TODO: Migrate to new ciflow trigger, reference https://github.com/pytorch/pytorch/pull/70321
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_wheel/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: macos-arm64-binary-wheel
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 0
-concurrency:
- group: macos-arm64-binary-wheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- wheel-py3_8-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_8-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- wheel-py3_8-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_8-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.8"
- build_name: wheel-py3_8-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_9-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_9-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- wheel-py3_9-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_9-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.9"
- build_name: wheel-py3_9-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_10-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_10-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- wheel-py3_10-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_10-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.10"
- build_name: wheel-py3_10-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_11-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_11-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- wheel-py3_11-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_11-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.11"
- build_name: wheel-py3_11-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_12-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: macos-13-xlarge
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- # For sccache access (only on non-forked PRs)
- AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
- steps:
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- # shellcheck disable=SC2129
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- # shellcheck disable=SC2129
- echo "MAC_PACKAGE_WORK_DIR=${RUNNER_TEMP}" >> "${GITHUB_ENV}"
- - name: Install conda and dependencies
- run: |
- # Install conda, setup-miniconda messes with the path that messes with the ruby stuff we do later on
- curl --retry 3 --retry-all-errors -o "${RUNNER_TEMP}/conda.sh" "https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-MacOSX-$(uname -m).sh"
- chmod +x "${RUNNER_TEMP}/conda.sh"
- /bin/bash "${RUNNER_TEMP}/conda.sh" -b -p "${RUNNER_TEMP}/anaconda"
- echo "${RUNNER_TEMP}/anaconda/bin" >> "${GITHUB_PATH}"
- if [ -d "/Applications/Xcode_14.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_14.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- elif [ -d "/Applications/Xcode_13.3.1.app" ]; then
- echo "DEVELOPER_DIR=/Applications/Xcode_13.3.1.app/Contents/Developer" >> "${GITHUB_ENV}"
- fi
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Install sccache (only for non-forked PRs, and pushes to trunk)
- uses: nick-fields/retry@v2.8.2
- if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
- with:
- timeout_minutes: 5
- max_attempts: 3
- retry_wait_seconds: 90
- command: |
- sudo curl --retry 3 --retry-all-errors https://s3.amazonaws.com/ossci-macos/sccache_v2.15 --output /usr/local/bin/sccache
- sudo chmod +x /usr/local/bin/sccache
- echo "SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> "${GITHUB_ENV}"
- - name: Populate binary env
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- run: |
- # shellcheck disable=SC1091
- source "${RUNNER_TEMP}/anaconda/bin/activate"
- "${PYTORCH_ROOT}/.circleci/scripts/binary_macos_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_12-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- wheel-py3_12-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_12-cpu-build
- with:
- PYTORCH_ROOT: /pytorch
- BUILDER_ROOT: /builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DOCKER_IMAGE: pytorch/manylinux-builder:cpu-main
- DESIRED_PYTHON: "3.12"
- build_name: wheel-py3_12-cpu
- use_s3: False
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-windows-binary-conda-nightly.yml b/.github/workflows/generated-windows-binary-conda-nightly.yml
deleted file mode 100644
index c3e4a038896e7..0000000000000
--- a/.github/workflows/generated-windows-binary-conda-nightly.yml
+++ /dev/null
@@ -1,4919 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: windows-binary-conda
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_conda/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: windows-binary-conda
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
-concurrency:
- group: windows-binary-conda-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- conda-py3_8-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_8-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_8-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_8-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_8-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_8-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_8-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_8-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_8-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_8-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_8-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_8-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_8-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_8-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_8-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.8"
- build_name: conda-py3_8-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_9-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_9-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_9-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_9-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_9-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_9-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_9-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_9-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_9-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_9-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_9-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_9-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_9-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_9-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_9-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.9"
- build_name: conda-py3_9-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_10-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_10-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_10-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_10-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_10-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_10-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_10-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_10-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_10-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_10-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_10-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_10-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_10-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_10-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_10-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.10"
- build_name: conda-py3_10-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_11-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_11-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_11-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_11-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_11-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_11-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_11-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_11-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_11-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_11-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_11-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_11-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_11-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_11-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_11-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.11"
- build_name: conda-py3_11-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_12-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_12-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_12-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_12-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_12-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_12-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_12-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_12-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_12-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- conda-py3_12-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: conda-py3_12-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: conda-py3_12-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: conda-py3_12-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- conda-py3_12-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: conda-py3_12-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: conda
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.12"
- build_name: conda-py3_12-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-windows-binary-libtorch-debug-main.yml b/.github/workflows/generated-windows-binary-libtorch-debug-main.yml
deleted file mode 100644
index 8ac413be0d65e..0000000000000
--- a/.github/workflows/generated-windows-binary-libtorch-debug-main.yml
+++ /dev/null
@@ -1,256 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: windows-binary-libtorch-debug
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: windows-binary-libtorch-debug
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
-concurrency:
- group: windows-binary-libtorch-debug-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-debug-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cpu-shared-with-deps-debug
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cpu-shared-with-deps-debug-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-debug-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cpu-shared-with-deps-debug
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
diff --git a/.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml b/.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
deleted file mode 100644
index 60ba59556926f..0000000000000
--- a/.github/workflows/generated-windows-binary-libtorch-debug-nightly.yml
+++ /dev/null
@@ -1,1059 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: windows-binary-libtorch-debug
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_libtorch/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: windows-binary-libtorch-debug
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
-concurrency:
- group: windows-binary-libtorch-debug-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-debug-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cpu-shared-with-deps-debug
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cpu-shared-with-deps-debug-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-debug-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cpu-shared-with-deps-debug
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cpu-shared-with-deps-debug-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cpu-shared-with-deps-debug-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cpu-shared-with-deps-debug
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- libtorch-cuda11_8-shared-with-deps-debug-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cuda11_8-shared-with-deps-debug
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda11_8-shared-with-deps-debug-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda11_8-shared-with-deps-debug-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cuda11_8-shared-with-deps-debug
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda11_8-shared-with-deps-debug-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda11_8-shared-with-deps-debug-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cuda11_8-shared-with-deps-debug
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- libtorch-cuda12_1-shared-with-deps-debug-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cuda12_1-shared-with-deps-debug
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_1-shared-with-deps-debug-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_1-shared-with-deps-debug-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cuda12_1-shared-with-deps-debug
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_1-shared-with-deps-debug-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_1-shared-with-deps-debug-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cuda12_1-shared-with-deps-debug
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- libtorch-cuda12_4-shared-with-deps-debug-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cuda12_4-shared-with-deps-debug
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_4-shared-with-deps-debug-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_4-shared-with-deps-debug-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cuda12_4-shared-with-deps-debug
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_4-shared-with-deps-debug-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_4-shared-with-deps-debug-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- LIBTORCH_CONFIG: debug
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cuda12_4-shared-with-deps-debug
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-windows-binary-libtorch-release-main.yml b/.github/workflows/generated-windows-binary-libtorch-release-main.yml
deleted file mode 100644
index ab00cdc8919ea..0000000000000
--- a/.github/workflows/generated-windows-binary-libtorch-release-main.yml
+++ /dev/null
@@ -1,256 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: windows-binary-libtorch-release
-
-on:
- push:
- branches:
- - main
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: windows-binary-libtorch-release
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
-concurrency:
- group: windows-binary-libtorch-release-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-release-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cpu-shared-with-deps-release
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cpu-shared-with-deps-release-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-release-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cpu-shared-with-deps-release
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
diff --git a/.github/workflows/generated-windows-binary-libtorch-release-nightly.yml b/.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
deleted file mode 100644
index 842de97a1fbe9..0000000000000
--- a/.github/workflows/generated-windows-binary-libtorch-release-nightly.yml
+++ /dev/null
@@ -1,1059 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: windows-binary-libtorch-release
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_libtorch/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: windows-binary-libtorch-release
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
-concurrency:
- group: windows-binary-libtorch-release-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- libtorch-cpu-shared-with-deps-release-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cpu-shared-with-deps-release
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cpu-shared-with-deps-release-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cpu-shared-with-deps-release-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cpu-shared-with-deps-release
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cpu-shared-with-deps-release-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cpu-shared-with-deps-release-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cpu-shared-with-deps-release
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- libtorch-cuda11_8-shared-with-deps-release-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cuda11_8-shared-with-deps-release
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda11_8-shared-with-deps-release-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda11_8-shared-with-deps-release-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cuda11_8-shared-with-deps-release
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda11_8-shared-with-deps-release-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda11_8-shared-with-deps-release-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cuda11_8-shared-with-deps-release
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- libtorch-cuda12_1-shared-with-deps-release-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cuda12_1-shared-with-deps-release
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_1-shared-with-deps-release-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_1-shared-with-deps-release-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cuda12_1-shared-with-deps-release
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_1-shared-with-deps-release-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_1-shared-with-deps-release-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cuda12_1-shared-with-deps-release
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- libtorch-cuda12_4-shared-with-deps-release-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: libtorch-cuda12_4-shared-with-deps-release
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_4-shared-with-deps-release-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-cuda12_4-shared-with-deps-release-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: libtorch-cuda12_4-shared-with-deps-release
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- libtorch-cuda12_4-shared-with-deps-release-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: libtorch-cuda12_4-shared-with-deps-release-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: libtorch
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- LIBTORCH_CONFIG: release
- LIBTORCH_VARIANT: shared-with-deps
- # This is a dummy value for libtorch to work correctly with our batch scripts
- # without this value pip does not get installed for some reason
- DESIRED_PYTHON: "3.8"
- build_name: libtorch-cuda12_4-shared-with-deps-release
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/generated-windows-binary-wheel-nightly.yml b/.github/workflows/generated-windows-binary-wheel-nightly.yml
deleted file mode 100644
index d64c221e7895f..0000000000000
--- a/.github/workflows/generated-windows-binary-wheel-nightly.yml
+++ /dev/null
@@ -1,4939 +0,0 @@
-# @generated DO NOT EDIT MANUALLY
-
-# Template is at: .github/templates/windows_binary_build_workflow.yml.j2
-# Generation script: .github/scripts/generate_ci_workflows.py
-name: windows-binary-wheel
-
-on:
- push:
- # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build
- branches:
- - nightly
- tags:
- # NOTE: Binary build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - 'ciflow/binaries/*'
- - 'ciflow/binaries_wheel/*'
- workflow_dispatch:
-
-env:
- # Needed for conda builds
- ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine"
- ANACONDA_USER: pytorch
- AWS_DEFAULT_REGION: us-east-1
- BUILD_ENVIRONMENT: windows-binary-wheel
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
-concurrency:
- group: windows-binary-wheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- wheel-py3_8-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_8-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_8-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_8-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_8-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.8"
- build_name: wheel-py3_8-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_8-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_8-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_8-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_8-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_8-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.8"
- build_name: wheel-py3_8-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_8-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_8-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_8-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_8-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_8-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.8"
- build_name: wheel-py3_8-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_8-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_8-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_8-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.8"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_8-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_8-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_8-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.8"
- build_name: wheel-py3_8-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_9-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_9-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_9-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_9-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_9-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.9"
- build_name: wheel-py3_9-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_9-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_9-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_9-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_9-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_9-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.9"
- build_name: wheel-py3_9-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_9-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_9-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_9-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_9-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_9-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.9"
- build_name: wheel-py3_9-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_9-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_9-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_9-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.9"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_9-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_9-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_9-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.9"
- build_name: wheel-py3_9-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_10-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_10-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_10-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_10-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_10-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.10"
- build_name: wheel-py3_10-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_10-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_10-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_10-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_10-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_10-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.10"
- build_name: wheel-py3_10-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_10-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_10-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_10-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_10-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_10-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.10"
- build_name: wheel-py3_10-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_10-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_10-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_10-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.10"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_10-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_10-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_10-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.10"
- build_name: wheel-py3_10-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_11-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_11-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_11-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_11-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_11-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.11"
- build_name: wheel-py3_11-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_11-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_11-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_11-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_11-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_11-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.11"
- build_name: wheel-py3_11-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_11-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_11-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_11-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_11-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_11-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.11"
- build_name: wheel-py3_11-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_11-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_11-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_11-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.11"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_11-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_11-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_11-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.11"
- build_name: wheel-py3_11-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_12-cpu-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_12-cpu
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cpu-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_12-cpu-build
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_12-cpu
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cpu-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_12-cpu-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cpu
- GPU_ARCH_TYPE: cpu
- DESIRED_PYTHON: "3.12"
- build_name: wheel-py3_12-cpu
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_12-cuda11_8-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_12-cuda11_8
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cuda11_8-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_12-cuda11_8-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_12-cuda11_8
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cuda11_8-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_12-cuda11_8-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu118
- GPU_ARCH_VERSION: 11.8
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.12"
- build_name: wheel-py3_12-cuda11_8
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_12-cuda12_1-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_12-cuda12_1
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cuda12_1-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_12-cuda12_1-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_12-cuda12_1
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cuda12_1-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_12-cuda12_1-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu121
- GPU_ARCH_VERSION: 12.1
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.12"
- build_name: wheel-py3_12-cuda12_1
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
- wheel-py3_12-cuda12_4-build:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: windows.4xlarge.nonephemeral
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==8.9.2.26; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Build PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_build.sh"
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: wheel-py3_12-cuda12_4
- retention-days: 14
- if-no-files-found: error
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cuda12_4-test: # Testing
- if: ${{ github.repository_owner == 'pytorch' }}
- needs: wheel-py3_12-cuda12_4-build
- runs-on: windows.8xlarge.nvidia.gpu
- timeout-minutes: 240
- env:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- SKIP_ALL_TESTS: 1
- DESIRED_PYTHON: "3.12"
- steps:
- - name: Display EC2 information
- shell: bash
- run: |
- set -euo pipefail
- function get_ec2_metadata() {
- # Pulled from instance metadata endpoint for EC2
- # see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
- category=$1
- curl -fsSL "http://169.254.169.254/latest/meta-data/${category}"
- }
- echo "ami-id: $(get_ec2_metadata ami-id)"
- echo "instance-id: $(get_ec2_metadata instance-id)"
- echo "instance-type: $(get_ec2_metadata instance-type)"
- echo "system info $(uname -a)"
- - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
- uses: pytorch/test-infra/.github/actions/setup-ssh@main
- continue-on-error: true
- with:
- github-secret: ${{ secrets.GITHUB_TOKEN }}
- # Needed for binary builds, see: https://github.com/pytorch/pytorch/issues/73339#issuecomment-1058981560
- - name: Enable long paths on Windows
- shell: powershell
- run: |
- Set-ItemProperty -Path "HKLM:\\SYSTEM\CurrentControlSet\Control\FileSystem" -Name "LongPathsEnabled" -Value 1
- # Since it's just a defensive command, the workflow should continue even the command fails. This step can be
- # removed once Windows Defender is removed from the AMI
- - name: Disables Windows Defender scheduled and real-time scanning for files in directories used by PyTorch
- continue-on-error: true
- shell: powershell
- run: |
- Add-MpPreference -ExclusionPath $(Get-Location).tostring(),$Env:TEMP -ErrorAction Ignore
- # Let's both exclude the path and disable Windows Defender completely just to be sure
- # that it doesn't interfere
- Set-MpPreference -DisableRealtimeMonitoring $True -ErrorAction Ignore
- # NOTE: These environment variables are put here so that they can be applied on every job equally
- # They are also here because setting them at a workflow level doesn't give us access to the
- # runner.temp variable, which we need.
- - name: Populate binary env
- shell: bash
- run: |
- echo "BINARY_ENV_FILE=${RUNNER_TEMP}/env" >> "${GITHUB_ENV}"
- echo "PYTORCH_FINAL_PACKAGE_DIR=${RUNNER_TEMP}/artifacts" >> "${GITHUB_ENV}"
- echo "WIN_PACKAGE_WORK_DIR=${RUNNER_TEMP}"
- - uses: actions/download-artifact@v3
- name: Download Build Artifacts
- with:
- name: wheel-py3_12-cuda12_4
- path: "${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
- - name: Checkout PyTorch
- uses: malfet/checkout@silent-checkout
- with:
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- submodules: recursive
- path: pytorch
- quiet-checkout: true
- - name: Clean PyTorch checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: pytorch
- - name: Checkout pytorch/builder
- uses: malfet/checkout@silent-checkout
- with:
- ref: main
- submodules: recursive
- repository: pytorch/builder
- path: builder
- quiet-checkout: true
- - name: Clean pytorch/builder checkout
- run: |
- # Remove any artifacts from the previous checkouts
- git clean -fxd
- working-directory: builder
- - name: Populate binary env
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_populate_env.sh"
- - name: Test PyTorch binary
- shell: bash
- run: |
- "${PYTORCH_ROOT}/.circleci/scripts/binary_windows_test.sh"
- - name: Wait until all sessions have drained
- shell: powershell
- working-directory: pytorch
- if: always()
- timeout-minutes: 120
- run: |
- .github\scripts\wait_for_ssh_to_drain.ps1
- - name: Kill active ssh sessions if still around (Useful if workflow was cancelled)
- shell: powershell
- working-directory: pytorch
- if: always()
- run: |
- .github\scripts\kill_active_ssh_sessions.ps1
- wheel-py3_12-cuda12_4-upload: # Uploading
- if: ${{ github.repository_owner == 'pytorch' }}
- permissions:
- id-token: write
- contents: read
- needs: wheel-py3_12-cuda12_4-test
- with:
- PYTORCH_ROOT: ${{ github.workspace }}/pytorch
- BUILDER_ROOT: ${{ github.workspace }}/builder
- PACKAGE_TYPE: wheel
- # TODO: This is a legacy variable that we eventually want to get rid of in
- # favor of GPU_ARCH_VERSION
- DESIRED_CUDA: cu124
- GPU_ARCH_VERSION: 12.4
- GPU_ARCH_TYPE: cuda
- DESIRED_PYTHON: "3.12"
- build_name: wheel-py3_12-cuda12_4
- secrets:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
- conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
- uses: ./.github/workflows/_binary-upload.yml
diff --git a/.github/workflows/inductor-micro-benchmark.yml b/.github/workflows/inductor-micro-benchmark.yml
deleted file mode 100644
index 4fe0ddf50ef2a..0000000000000
--- a/.github/workflows/inductor-micro-benchmark.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: inductor-micro-benchmark
-
-on:
- schedule:
- - cron: 0 7 * * *
- push:
- tags:
- - ciflow/inductor-micro-benchmark/*
- workflow_dispatch:
-
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- linux-focal-cuda12_1-py3_10-gcc9-inductor-micro-benchmark-build:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.0'
- test-matrix: |
- { include: [
- { config: "inductor-micro-benchmark", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-micro-benchmark-test:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-micro-benchmark-build
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-micro-benchmark-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-micro-benchmark-build.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- timeout-minutes: 720
diff --git a/.github/workflows/inductor-perf-compare.yml b/.github/workflows/inductor-perf-compare.yml
deleted file mode 100644
index e485a8bfce1b7..0000000000000
--- a/.github/workflows/inductor-perf-compare.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-name: inductor-A100-perf-compare
-
-on:
- push:
- tags:
- - ciflow/inductor-perf-compare/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.0'
- test-matrix: |
- { include: [
- { config: "inductor_huggingface_perf_compare", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
- { config: "inductor_timm_perf_compare", shard: 1, num_shards: 2, runner: "linux.gcp.a100" },
- { config: "inductor_timm_perf_compare", shard: 2, num_shards: 2, runner: "linux.gcp.a100" },
- { config: "inductor_torchbench_perf_compare", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
- ]}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-test:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/inductor-perf-test-nightly.yml b/.github/workflows/inductor-perf-test-nightly.yml
deleted file mode 100644
index e77c915749f3f..0000000000000
--- a/.github/workflows/inductor-perf-test-nightly.yml
+++ /dev/null
@@ -1,138 +0,0 @@
-name: inductor-A100-perf-nightly
-
-on:
- schedule:
- - cron: 0 7 * * 1-6
- - cron: 0 7 * * 0
- # NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
- # out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
- workflow_dispatch:
- inputs:
- training:
- description: Run training (on by default)?
- required: false
- type: boolean
- default: true
- inference:
- description: Run inference (off by default)?
- required: false
- type: boolean
- default: false
- default:
- description: Run inductor_default?
- required: false
- type: boolean
- default: false
- dynamic:
- description: Run inductor_dynamic_shapes?
- required: false
- type: boolean
- default: false
- cudagraphs:
- description: Run inductor_cudagraphs?
- required: false
- type: boolean
- default: true
- freezing_cudagraphs:
- description: Run inductor_cudagraphs with freezing for inference?
- required: false
- type: boolean
- default: false
- freeze_autotune_cudagraphs:
- description: Run inductor_cudagraphs with freezing and max autotune for inference?
- required: false
- type: boolean
- default: false
- aotinductor:
- description: Run aot_inductor for inference?
- required: false
- type: boolean
- default: false
- maxautotune:
- description: Run inductor_max_autotune?
- required: false
- type: boolean
- default: false
- benchmark_configs:
- description: The list of configs used the benchmark
- required: false
- type: string
- default: inductor_huggingface_perf,inductor_timm_perf,inductor_torchbench_perf
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.0'
- test-matrix: |
- { include: [
- { config: "inductor_huggingface_perf", shard: 1, num_shards: 3, runner: "linux.gcp.a100.large" },
- { config: "inductor_huggingface_perf", shard: 2, num_shards: 3, runner: "linux.gcp.a100.large" },
- { config: "inductor_huggingface_perf", shard: 3, num_shards: 3, runner: "linux.gcp.a100.large" },
- { config: "inductor_timm_perf", shard: 1, num_shards: 5, runner: "linux.gcp.a100.large" },
- { config: "inductor_timm_perf", shard: 2, num_shards: 5, runner: "linux.gcp.a100.large" },
- { config: "inductor_timm_perf", shard: 3, num_shards: 5, runner: "linux.gcp.a100.large" },
- { config: "inductor_timm_perf", shard: 4, num_shards: 5, runner: "linux.gcp.a100.large" },
- { config: "inductor_timm_perf", shard: 5, num_shards: 5, runner: "linux.gcp.a100.large" },
- { config: "inductor_torchbench_perf", shard: 1, num_shards: 4, runner: "linux.gcp.a100.large" },
- { config: "inductor_torchbench_perf", shard: 2, num_shards: 4, runner: "linux.gcp.a100.large" },
- { config: "inductor_torchbench_perf", shard: 3, num_shards: 4, runner: "linux.gcp.a100.large" },
- { config: "inductor_torchbench_perf", shard: 4, num_shards: 4, runner: "linux.gcp.a100.large" },
- ]}
- selected-test-configs: ${{ inputs.benchmark_configs }}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-test-nightly:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
- if: github.event.schedule == '0 7 * * 1-6'
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- timeout-minutes: 720
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-test-weekly:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
- if: github.event.schedule == '0 7 * * 0'
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-aotinductor-true-freezing_cudagraphs-true-maxautotune-true-freeze_autotune_cudagraphs-true-cudagraphs_low_precision-true
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- timeout-minutes: 1440
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-test:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
- if: github.event_name == 'workflow_dispatch'
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-false-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- timeout-minutes: 720
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/inductor-periodic.yml b/.github/workflows/inductor-periodic.yml
deleted file mode 100644
index 6f8c06ed030b0..0000000000000
--- a/.github/workflows/inductor-periodic.yml
+++ /dev/null
@@ -1,58 +0,0 @@
-name: inductor-periodic
-
-on:
- push:
- tags:
- - ciflow/inductor/*
- workflow_dispatch:
- schedule:
- # Run every 4 hours during the week and every 12 hours on the weekend
- - cron: 45 0,4,8,12,16,20 * * 1-5
- - cron: 45 4,12 * * 0,6
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-
-permissions: read-all
-
-jobs:
- linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build:
- name: cuda12.1-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.6'
- test-matrix: |
- { include: [
- { config: "dynamo_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamo_eager_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamo_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamo_eager_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamo_eager_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_eager_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_eager_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_eager_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_aot_eager_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_aot_eager_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_aot_eager_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_aot_eager_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_aot_eager_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- ]}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-test:
- name: cuda12.1-py3.10-gcc9-sm86-periodic-dynamo-benchmarks
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/inductor.yml b/.github/workflows/inductor.yml
deleted file mode 100644
index 0ad799a80bcc0..0000000000000
--- a/.github/workflows/inductor.yml
+++ /dev/null
@@ -1,142 +0,0 @@
-name: inductor
-
-on:
- push:
- branches:
- - main
- - release/*
- tags:
- - ciflow/inductor/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- linux-focal-rocm6_1-py3_8-inductor-build:
- name: rocm6.1-py3.8-inductor
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- test-matrix: |
- { include: [
- { config: "inductor", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.2" },
- ]}
-
- linux-focal-rocm6_1-py3_8-inductor-test:
- permissions:
- id-token: write
- contents: read
- name: rocm6.1-py3.8-inductor
- uses: ./.github/workflows/_rocm-test.yml
- needs: linux-focal-rocm6_1-py3_8-inductor-build
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.test-matrix }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
- name: cuda12.1-py3.10-gcc9-sm86
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.6'
- test-matrix: |
- { include: [
- { config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
- { config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "inductor_cpp_wrapper_abi_compatible", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
- ]}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-test:
- name: cuda12.1-py3.10-gcc9-sm86
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.0'
- test-matrix: |
- { include: [
- { config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
- ]}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-inductor-test-gcp:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-jammy-cpu-py3_8-gcc11-inductor-build:
- name: linux-jammy-cpu-py3.8-gcc11-inductor
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11-build
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks
- test-matrix: |
- { include: [
- { config: "cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
- { config: "cpu_inductor_timm", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
- { config: "cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
- { config: "cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
- { config: "cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
- { config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
- { config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
- { config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
- { config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
- { config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
- { config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
- ]}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-jammy-cpu-py3_8-gcc11-inductor-test:
- name: linux-jammy-cpu-py3.8-gcc11-inductor
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-jammy-cpu-py3_8-gcc11-inductor-build
- with:
- build-environment: linux-jammy-py3.8-gcc11-build
- docker-image: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-cpu-py3_8-gcc11-inductor-build.outputs.test-matrix }}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/lint-bc.yml b/.github/workflows/lint-bc.yml
deleted file mode 100644
index 73d7805082026..0000000000000
--- a/.github/workflows/lint-bc.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-name: BC Lint
-
-on:
- pull_request:
- types:
- - opened
- - synchronize
- - reopened
- branches-ignore:
- - nightly
- workflow_dispatch:
-
-jobs:
- bc_linter:
- runs-on: ubuntu-latest
- steps:
- - name: Run BC Lint Action
- uses: pytorch/test-infra/.github/actions/bc-lint@main
- with:
- repo: ${{ github.event.pull_request.head.repo.full_name }}
- base_sha: ${{ github.event.pull_request.base.sha }}
- head_sha: ${{ github.event.pull_request.head.sha }}
- suppression: ${{ contains(github.event.pull_request.labels.*.name, 'suppress-api-compatibility-check') || contains(github.event.pull_request.labels.*.name, 'suppress-bc-linter') }}
- docs_link: 'https://github.com/pytorch/test-infra/wiki/BC-Linter'
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
deleted file mode 100644
index f1b6611d00e03..0000000000000
--- a/.github/workflows/lint.yml
+++ /dev/null
@@ -1,266 +0,0 @@
-name: Lint
-
-on:
- pull_request:
- branches-ignore:
- - nightly
- push:
- branches:
- - main
- - release/*
- - landchecks/*
- workflow_dispatch:
-
-permissions: read-all
-# The names of steps that actually test the code should be suffixed with `(nonretryable)`.
-# When any other step fails, it's job will be retried once by retryBot.
-jobs:
- lintrunner-clang:
- uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
- with:
- timeout: 120
- runner: linux.2xlarge
- docker-image: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter
- # NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout
- # to run git rev-parse HEAD~:.ci/docker when a new image is needed
- fetch-depth: 0
- submodules: true
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- script: |
- export ADDITIONAL_LINTRUNNER_ARGS="--take CLANGTIDY,CLANGFORMAT"
- export CLANG=1
- .github/scripts/lintrunner.sh
-
- lintrunner-noclang:
- uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
- with:
- timeout: 120
- runner: linux.2xlarge
- docker-image: pytorch-linux-jammy-cuda11.8-cudnn8-py3.9-linter
- # NB: A shallow checkout won't work here because calculate-docker-image requires a full checkout
- # to run git rev-parse HEAD~:.ci/docker when a new image is needed
- fetch-depth: 0
- submodules: true
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- script: |
- export ADDITIONAL_LINTRUNNER_ARGS="--skip CLANGTIDY,CLANGFORMAT"
- .github/scripts/lintrunner.sh
-
- quick-checks:
- uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
- with:
- runner: linux.2xlarge
- docker-image: pytorch-linux-focal-linter
- fetch-depth: 0
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- script: |
- # The generic Linux job chooses to use base env, not the one setup by the image
- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
- conda activate "${CONDA_ENV}"
-
- # Ensure no non-breaking spaces
- # NB: We use 'printf' below rather than '\u000a' since bash pre-4.2
- # does not support the '\u000a' syntax (which is relevant for local linters)
- (! git --no-pager grep -In "$(printf '\xC2\xA0')" -- . || (echo "The above lines have non-breaking spaces (U+00A0); please convert them to spaces (U+0020)"; false))
-
- # Ensure cross-OS compatible file names
- (! git ls-files | grep -E '([<>:"|?*]|[ .]$)' || (echo "The above file names are not valid across all operating systems. Please ensure they don't contain the characters '<>:""|?*' and don't end with a white space or a '.' "; false))
-
- # Ensure no versionless Python shebangs
- (! git --no-pager grep -In '#!.*python$' -- . || (echo "The above lines have versionless Python shebangs; please specify either python2 or python3"; false))
-
- # Ensure ciflow tags mentioned in config
- python3 .github/scripts/collect_ciflow_labels.py --validate-tags
-
- # C++ docs check
- pushd docs/cpp/source
- ./check-doxygen.sh
- popd
-
- # CUDA kernel launch check
- set -eux
- python3 torch/testing/_internal/check_kernel_launches.py |& tee cuda_kernel_launch_checks.txt
-
- pr-sanity-checks:
- name: pr-sanity-checks
- runs-on: [self-hosted, linux.large]
- # Only run this on pull requests. This check is simple enough to be done without a Docker image
- if: github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'skip-pr-sanity-checks')
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
- fetch-depth: -1
-
- - name: PR size check (nonretryable)
- env:
- BASE: ${{ github.event.pull_request.base.sha }}
- HEAD: ${{ github.event.pull_request.head.sha }}
- run: |
- bash .github/scripts/pr-sanity-check.sh
-
- workflow-checks:
- uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
- with:
- runner: linux.2xlarge
- docker-image: pytorch-linux-focal-linter
- fetch-depth: -1
- submodules: true
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- script: |
- # The generic Linux job chooses to use base env, not the one setup by the image
- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
- conda activate "${CONDA_ENV}"
-
- # Regenerate workflows
- .github/scripts/generate_ci_workflows.py
-
- RC=0
- # Assert that regenerating the workflows didn't change them
- if ! .github/scripts/report_git_status.sh .github/workflows; then
- echo
- echo 'As shown by the above diff, the committed .github/workflows'
- echo 'are not up to date according to .github/templates.'
- echo 'Please run this command, commit, and push again to your PR:'
- echo
- echo ' .github/scripts/generate_ci_workflows.py'
- echo
- echo 'If running that command does nothing, you may need to rebase'
- echo 'onto a more recent commit from the PyTorch main branch.'
- RC=1
- fi
-
- # Check that jobs will be cancelled
- .github/scripts/ensure_actions_will_cancel.py
-
- exit $RC
-
- toc:
- uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
- with:
- runner: linux.2xlarge
- docker-image: pytorch-linux-focal-linter
- fetch-depth: 0
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- script: |
- # The generic Linux job chooses to use base env, not the one setup by the image
- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
- conda activate "${CONDA_ENV}"
-
- # Regenerate ToCs and check that they didn't change
- set -eu
-
- export PATH=~/.npm-global/bin:"$PATH"
- for FILE in $(git grep -Il '' -- '**.md'); do
- markdown-toc --bullets='-' -i "$FILE"
- done
-
- if ! .github/scripts/report_git_status.sh .; then
- echo
- echo 'As shown by the above diff, the table of contents in one or'
- echo 'more Markdown files is not up to date with the file contents.'
- echo 'You can either apply that Git diff directly to correct the'
- echo 'table of contents, or if you have npm installed, you can'
- echo 'install the npm package markdown-toc and run the following'
- # shellcheck disable=SC2016
- echo 'command (replacing $FILE with the filename for which you want'
- echo 'to regenerate the table of contents):'
- echo
- # shellcheck disable=SC2016
- echo " markdown-toc --bullets='-' -i \"\$FILE\""
- false
- fi
-
- test-tools:
- name: Test tools
- if: ${{ github.repository == 'pytorch/pytorch' }}
- uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
- with:
- runner: linux.2xlarge
- docker-image: pytorch-linux-focal-linter
- fetch-depth: 0
- ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
- script: |
- # The generic Linux job chooses to use base env, not the one setup by the image
- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
- conda activate "${CONDA_ENV}"
-
- # Test tools
- python3 -m unittest discover -vs tools/test -p 'test_*.py'
- python3 -m unittest discover -vs .github/scripts -p 'test_*.py'
-
- test_run_test:
- name: Test `run_test.py` is usable without boto3/rockset
- if: ${{ github.repository == 'pytorch/pytorch' }}
- runs-on: linux.20_04.4x
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
- fetch-depth: 1
- - name: Setup Python 3.8
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- architecture: x64
- cache: pip
- - name: Install dependencies
- run: |
- pip install pytest-rerunfailures==11.1.* pytest-flakefinder==1.1.* pytest-xdist==3.3.* expecttest==0.1.* numpy==1.24.*
- pip install torch --pre --index-url https://download.pytorch.org/whl/nightly/cpu/
- - name: Run run_test.py (nonretryable)
- run: |
- # Run test_vulkan, which is a fast noop on Linux
- python3 test/run_test.py --include test_vulkan --verbose
-
- test_collect_env:
- if: ${{ github.repository == 'pytorch/pytorch' }}
- name: Test collect_env
- runs-on: linux.20_04.4x
- strategy:
- matrix:
- test_type: [with_torch, without_torch, older_python_version]
- steps:
- # [see note: pytorch repo ref]
- # deep clone (fetch-depth 0) required, to allow us to use git log
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
- fetch-depth: 1
- - name: Setup Python 3.6
- if: matrix.test_type == 'older_python_version'
- uses: actions/setup-python@v4
- with:
- python-version: '3.6'
- architecture: x64
- check-latest: false
- cache: pip
- cache-dependency-path: |
- **/requirements.txt
- - name: Setup Python 3.8
- if: matrix.test_type != 'older_python_version'
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- architecture: x64
- check-latest: false
- cache: pip
- cache-dependency-path: |
- **/requirements.txt
- - name: Install torch
- if: matrix.test_type == 'with_torch'
- run: |
- pip install -r requirements.txt
- # Doesn't really matter what torch version, we just need ANY torch installed
- pip install 'torch==1.*'
- - name: Run collect_env.py (nonretryable)
- run: |
- # All we need to see is that it passes
- python3 torch/utils/collect_env.py
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
diff --git a/.github/workflows/linux-aarch64.yml b/.github/workflows/linux-aarch64.yml
deleted file mode 100644
index acdb6884971b6..0000000000000
--- a/.github/workflows/linux-aarch64.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: linux-aarch64
-
-on:
- push:
- tags:
- - ciflow/linux-aarch64/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} but found ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-jobs:
- linux-jammy-aarch64-py3_10-build:
- name: linux-jammy-aarch64-py3.10
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-jammy-aarch64-py3.10
- docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
- runner: linux.arm64.2xlarge
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 4, runner: "linux.arm64.2xlarge" },
- { config: "default", shard: 2, num_shards: 4, runner: "linux.arm64.2xlarge" },
- { config: "default", shard: 3, num_shards: 4, runner: "linux.arm64.2xlarge" },
- { config: "default", shard: 4, num_shards: 4, runner: "linux.arm64.2xlarge" },
- ]}
-
- linux-jammy-aarch64-py3_10-test:
- name: linux-jammy-aarch64-py3.10
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-jammy-aarch64-py3_10-build
- permissions:
- id-token: write
- contents: read
- with:
- build-environment: linux-jammy-aarch64-py3.10
- docker-image: ${{ needs.linux-jammy-aarch64-py3_10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-aarch64-py3_10-build.outputs.test-matrix }}
diff --git a/.github/workflows/llm_td_retrieval.yml b/.github/workflows/llm_td_retrieval.yml
deleted file mode 100644
index 047e8ace0049d..0000000000000
--- a/.github/workflows/llm_td_retrieval.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-name: Retrieval PyTorch Tests for Target Determination
-
-on:
- workflow_call:
-
-permissions:
- id-token: write
- contents: read
-
-jobs:
- llm-retrieval:
- runs-on: linux.4xlarge
- continue-on-error: true
- steps:
- - name: Clone PyTorch
- uses: actions/checkout@v3
- with:
- repository: pytorch/pytorch
- fetch-depth: 0
- path: pytorch
-
- - name: Setup Linux
- uses: ./pytorch/.github/actions/setup-linux
-
- - name: Clone CodeLlama
- uses: actions/checkout@v3
- with:
- repository: osalpekar/codellama
- ref: main
- path: codellama
-
- - name: Clone Target Determination Code
- uses: actions/checkout@v3
- with:
- repository: osalpekar/llm-target-determinator
- ref: v0.0.2
- path: llm-target-determinator
-
- - name: Setup Conda
- uses: conda-incubator/setup-miniconda@v2.1.1
- with:
- miniconda-version: "py39_4.12.0"
- python-version: 3.9
-
- - name: Install Requirements
- shell: bash -l {0}
- run: |
- set -euxo pipefail
- conda create \
- --yes \
- --quiet \
- --name "tdenv" \
- "python=3.9"
- conda activate tdenv
- cd "${GITHUB_WORKSPACE}/llm-target-determinator"
- pip install -r requirements.txt
- cd ../codellama
- pip install -e .
-
- - name: Fetch CodeLlama Checkpoint
- shell: bash -l {0}
- run: |
- set -euxo pipefail
- conda activate tdenv
- cd codellama/
- mkdir "CodeLlama-7b-Python"
- aws s3 cp "s3://target-determinator-assets/CodeLlama-7b-Python" "CodeLlama-7b-Python" --recursive --no-progress
-
- - name: Fetch indexes
- uses: nick-fields/retry@v2.8.2
- with:
- max_attempts: 3
- retry_wait_seconds: 10
- timeout_minutes: 5
- shell: bash
- command: |
- set -euxo pipefail
- python3 -m pip install awscli==1.29.40
- cd "${GITHUB_WORKSPACE}"/llm-target-determinator/assets
- aws s3 cp "s3://target-determinator-assets/indexes/latest" . --recursive
-
- unzip -o indexer-files\*.zip
- rm indexer-files*.zip
-
- - name: Run Retriever
- id: run_retriever
- continue-on-error: true # ghstack not currently supported due to problems getting git diff
- shell: bash -l {0}
- run: |
- set -euxo pipefail
- conda activate tdenv
- cd "${GITHUB_WORKSPACE}"/llm-target-determinator
- torchrun \
- --standalone \
- --nnodes=1 \
- --nproc-per-node=1 \
- retriever.py \
- --experiment-name indexer-files \
- --pr-parse-format GITDIFF
- cd assets
- zip -r mappings.zip mappings
-
- - name: Upload results to s3
- uses: seemethere/upload-artifact-s3@v5
- if: ${{ steps.run_retriever.outcome == 'success' }}
- with:
- name: llm_results
- retention-days: 14
- if-no-files-found: warn
- path: llm-target-determinator/assets/mappings.zip
- env:
- AWS_ACCESS_KEY_ID: ""
- AWS_SECRET_ACCESS_KEY: ""
- AWS_SESSION_TOKEN: ""
- AWS_DEFAULT_REGION: ""
- AWS_REGION: ""
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
diff --git a/.github/workflows/mac-mps.yml b/.github/workflows/mac-mps.yml
deleted file mode 100644
index da98d01550a47..0000000000000
--- a/.github/workflows/mac-mps.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-name: Mac MPS
-
-on:
- push:
- tags:
- - ciflow/mps/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- macos-13-py3-arm64-build:
- name: macos-13-py3-arm64
- uses: ./.github/workflows/_mac-build.yml
- with:
- sync-tag: macos-py3-arm64-build
- build-environment: macos-13-py3-arm64
- runner-type: macos-m1-stable
- build-generates-artifacts: true
- # To match the one pre-installed in the m1 runners
- python-version: 3.9.12
- test-matrix: |
- { include: [
- { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-stable" },
- { config: "mps", shard: 1, num_shards: 1, runner: "macos-m2-14" },
- ]}
-
- macos-py3-arm64-mps-test:
- name: macos-py3-arm64-mps
- uses: ./.github/workflows/_mac-test-mps.yml
- needs: macos-13-py3-arm64-build
- with:
- sync-tag: macos-py3-arm64-mps-test
- build-environment: macos-13-py3-arm64
- # Same as the build job
- python-version: 3.9.12
- test-matrix: ${{ needs.macos-13-py3-arm64-build.outputs.test-matrix }}
diff --git a/.github/workflows/nightly-rockset-uploads.yml b/.github/workflows/nightly-rockset-uploads.yml
deleted file mode 100644
index f553cfd068d14..0000000000000
--- a/.github/workflows/nightly-rockset-uploads.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-name: Nightly Upload to rockset
-
-on:
- schedule:
- # Choose a random time near midnight PST because it may be delayed if there are high loads
- - cron: 37 7 * * *
- pull_request:
- paths:
- - 'tools/stats/upload_external_contrib_stats.py'
- - 'tools/stats/upload_test_stat_aggregates.py'
- - '.github/workflows/nightly-rockset-uploads.yml'
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
-
- upload-stats-to-rockset:
- runs-on: ubuntu-22.04
- environment: upload-stats
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
-
- - uses: actions/setup-python@v4
- with:
- python-version: '3.11'
- cache: pip
-
- - run: |
- pip3 install requests==2.26 rockset==1.0.3 boto3==1.19.12
-
- - name: Upload external contribution stats
- uses: nick-fields/retry@v2.8.2
- env:
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- if: ${{ env.ROCKSET_API_KEY != '' }}
- with:
- timeout_minutes: 10
- max_attempts: 10
- retry_wait_seconds: 90
- command: |
- echo "Uploading external contribution stats for 10 days starting on" "$(date -d '10 days ago' '+%Y-%m-%d')"
- python3 -m tools.stats.upload_external_contrib_stats --startDate "$(date -d '10 days ago' '+%Y-%m-%d')" --length 10
- echo "Uploading testing aggregate data" "$(date -d yesterday '+%Y-%m-%d')"
- python3 -m tools.stats.upload_test_stat_aggregates --date "$(date -d yesterday '+%Y-%m-%d')"
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
deleted file mode 100644
index 25f71c70e9486..0000000000000
--- a/.github/workflows/nightly.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-name: nightly
-
-on:
- schedule:
- - cron: 0 0 * * *
- push:
- tags:
- # NOTE: Doc build pipelines should only get triggered on release candidate builds
- # Release candidate tags look like: v1.11.0-rc1
- - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
- - ciflow/nightly/*
- workflow_dispatch:
-
-
-concurrency:
- group: ${{ github.workflow }}--${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-jobs:
- docs-build:
- name: docs build
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
-
- docs-push:
- name: docs push
- uses: ./.github/workflows/_docs.yml
- needs: docs-build
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image: ${{ needs.docs-build.outputs.docker-image }}
- push: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || startsWith(github.event.ref, 'refs/tags/v') }}
- run-doxygen: true
- secrets:
- GH_PYTORCHBOT_TOKEN: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
-
- update-vision-commit-hash:
- runs-on: ubuntu-latest
- environment: update-commit-hash
- steps:
- - name: update-vision-commit-hash
- uses: pytorch/test-infra/.github/actions/update-commit-hash@main
- if: ${{ github.event_name == 'schedule' }}
- with:
- repo-name: vision
- branch: main
- pin-folder: .github/ci_commit_pins
- test-infra-ref: main
- updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
- pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
-
- update-audio-commit-hash:
- runs-on: ubuntu-latest
- environment: update-commit-hash
- steps:
- - name: update-audio-commit-hash
- uses: pytorch/test-infra/.github/actions/update-commit-hash@main
- if: ${{ github.event_name == 'schedule' }}
- with:
- repo-name: audio
- branch: main
- pin-folder: .github/ci_commit_pins
- test-infra-ref: main
- updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
- pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
-
- update-executorch-commit-hash:
- runs-on: ubuntu-latest
- environment: update-commit-hash
- steps:
- - name: update-executorch-commit-hash
- uses: pytorch/test-infra/.github/actions/update-commit-hash@main
- if: ${{ github.event_name == 'schedule' }}
- with:
- repo-name: executorch
- branch: main
- pin-folder: .ci/docker/ci_commit_pins
- test-infra-ref: main
- updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
- pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
diff --git a/.github/workflows/periodic.yml b/.github/workflows/periodic.yml
deleted file mode 100644
index 716a72cc6d235..0000000000000
--- a/.github/workflows/periodic.yml
+++ /dev/null
@@ -1,244 +0,0 @@
-name: periodic
-
-on:
- schedule:
- # We have several schedules so jobs can check github.event.schedule to activate only for a fraction of the runs.
- # Also run less frequently on weekends.
- - cron: 45 0,8,16 * * 1-5
- - cron: 45 4 * * 0,6
- - cron: 45 4,12,20 * * 1-5
- - cron: 45 12 * * 0,6
- - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
- push:
- tags:
- - ciflow/periodic/*
- branches:
- - release/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- llm-td:
- name: before-test
- uses: ./.github/workflows/llm_td_retrieval.yml
- permissions:
- id-token: write
- contents: read
-
- target-determination:
- name: before-test
- uses: ./.github/workflows/target_determination.yml
- needs: llm-td
- permissions:
- id-token: write
- contents: read
-
- parallelnative-linux-jammy-py3_8-gcc11-build:
- name: parallelnative-linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: parallelnative-linux-jammy-py3.8-gcc11
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- ]}
-
- parallelnative-linux-jammy-py3_8-gcc11-test:
- name: parallelnative-linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - parallelnative-linux-jammy-py3_8-gcc11-build
- - target-determination
- with:
- build-environment: parallelnative-linux-jammy-py3.8-gcc11
- docker-image: ${{ needs.parallelnative-linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
- test-matrix: ${{ needs.parallelnative-linux-jammy-py3_8-gcc11-build.outputs.test-matrix }}
-
- linux-focal-cuda11_8-py3_9-gcc9-build:
- name: linux-focal-cuda11.8-py3.9-gcc9
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda11.8-py3.9-gcc9
- docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
- cuda-arch-list: 8.6
- test-matrix: |
- { include: [
- { config: "multigpu", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
- ]}
- build-with-debug: false
-
- linux-focal-cuda11_8-py3_9-gcc9-test:
- name: linux-focal-cuda11.8-py3.9-gcc9
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda11_8-py3_9-gcc9-build
- with:
- build-environment: linux-focal-cuda11.8-py3.9-gcc9
- docker-image: ${{ needs.linux-focal-cuda11_8-py3_9-gcc9-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda11_8-py3_9-gcc9-build.outputs.test-matrix }}
-
- linux-focal-cuda11_8-py3_10-gcc9-debug-build:
- name: linux-focal-cuda11.8-py3.10-gcc9-debug
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda11.8-py3.10-gcc9-debug
- docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
- build-with-debug: true
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda11_8-py3_10-gcc9-debug-test:
- name: linux-focal-cuda11.8-py3.10-gcc9-debug
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda11_8-py3_10-gcc9-debug-build
- - target-determination
- with:
- build-environment: linux-focal-cuda11.8-py3.10-gcc9-debug
- docker-image: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-debug-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-debug-build.outputs.test-matrix }}
-
- win-vs2019-cuda11_8-py3-build:
- name: win-vs2019-cuda11.8-py3
- uses: ./.github/workflows/_win-build.yml
- with:
- build-environment: win-vs2019-cuda11.8-py3
- cuda-version: "11.8"
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 4, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 2, num_shards: 4, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 3, num_shards: 4, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 4, num_shards: 4, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge.nonephemeral" },
- ]}
-
- win-vs2019-cuda11_8-py3-test:
- name: win-vs2019-cuda11.8-py3
- uses: ./.github/workflows/_win-test.yml
- needs:
- - win-vs2019-cuda11_8-py3-build
- - target-determination
- with:
- build-environment: win-vs2019-cuda11.8-py3
- cuda-version: "11.8"
- test-matrix: ${{ needs.win-vs2019-cuda11_8-py3-build.outputs.test-matrix }}
-
- # TODO: Figure out how to migrate this job to M1 runner
- ios-build-test:
- name: ios-build-test
- if: github.event_name != 'schedule' || github.event.schedule == '45 0,8,16 * * 1-5' || github.event.schedule == '45 4 * * 0,6' || github.event.schedule == '29 8 * * *'
- uses: ./.github/workflows/_ios-build-test.yml
- with:
- trigger-event: ${{ github.event_name }}
- build-environment: ios-build-test
- sync-tag: ios-build-test
- test-matrix: |
- { include: [
- { config: "default",
- shard: 1,
- num_shards: 1,
- runner: "macos-13-xlarge",
- ios_platform: "SIMULATOR",
- ios_arch: "arm64",
- use_lite_interpreter: 1,
- use_metal: 0,
- use_coreml: 1,
- use_custom_op_list: ""
- },
- { config: "default",
- shard: 1,
- num_shards: 1,
- runner: "macos-13-xlarge",
- ios_platform: "OS",
- ios_arch: "arm64",
- use_lite_interpreter: 1,
- use_metal: 1,
- use_coreml: 1,
- use_custom_op_list: "mobilenetv2.yaml"
- }
- ]}
-
- buck-build-test:
- name: buck-build-test
- uses: ./.github/workflows/_buck-build-test.yml
- with:
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "ubuntu-latest" },
- ]}
-
- android-emulator-build-test:
- name: android-emulator-build-test
- uses: ./.github/workflows/_run_android_tests.yml
- with:
- test-matrix: |
- { include: [
- { config: 'default',
- shard: 1,
- num_shards: 1,
- runner: 'ubuntu-20.04-16x',
- use_lite_interpreter: 1,
- # Just set x86 for testing here
- support_abi: 'x86',
- },
- ]}
-
- linux-vulkan-focal-py3_11-clang10-build:
- name: linux-vulkan-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-vulkan-focal-py3.11-clang10
- docker-image-name: pytorch-linux-focal-py3.11-clang10
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- ]}
-
- linux-vulkan-focal-py3_11-clang10-test:
- name: linux-vulkan-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-vulkan-focal-py3_11-clang10-build
- with:
- build-environment: linux-vulkan-focal-py3.11-clang10
- docker-image: ${{ needs.linux-vulkan-focal-py3_11-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-vulkan-focal-py3_11-clang10-build.outputs.test-matrix }}
-
- linux-focal-rocm6_1-py3_8-build:
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- test-matrix: |
- { include: [
- { config: "distributed", shard: 1, num_shards: 2, runner: "linux.rocm.gpu" },
- { config: "distributed", shard: 2, num_shards: 2, runner: "linux.rocm.gpu" },
- ]}
-
- linux-focal-rocm6_1-py3_8-test:
- permissions:
- id-token: write
- contents: read
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_rocm-test.yml
- needs:
- - linux-focal-rocm6_1-py3_8-build
- - target-determination
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml
deleted file mode 100644
index 0ca9e0d33c8f9..0000000000000
--- a/.github/workflows/pull.yml
+++ /dev/null
@@ -1,478 +0,0 @@
-name: pull
-
-on:
- pull_request:
- branches-ignore:
- - nightly
- push:
- branches:
- - main
- - release/*
- - landchecks/*
- workflow_dispatch:
- schedule:
- - cron: 29 8 * * * # about 1:29am PDT
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- llm-td:
- name: before-test
- uses: ./.github/workflows/llm_td_retrieval.yml
- permissions:
- id-token: write
- contents: read
-
- target-determination:
- name: before-test
- uses: ./.github/workflows/target_determination.yml
- needs: llm-td
- permissions:
- id-token: write
- contents: read
-
- linux-jammy-py3_8-gcc11-build:
- name: linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- { config: "docs_test", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- { config: "backwards_compat", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- { config: "distributed", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
- { config: "distributed", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
- ]}
-
- linux-jammy-py3_8-gcc11-test:
- name: linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-jammy-py3_8-gcc11-build
- - target-determination
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.test-matrix }}
-
- linux-docs:
- name: linux-docs
- uses: ./.github/workflows/_docs.yml
- needs: linux-jammy-py3_8-gcc11-build
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
-
- linux-jammy-py3_8-gcc11-no-ops:
- name: linux-jammy-py3.8-gcc11-no-ops
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11-no-ops
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-jammy-py3_8-gcc11-pch:
- name: linux-jammy-py3.8-gcc11-pch
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11-pch
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
-
- linux-jammy-py3_10-clang15-asan-build:
- name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3.10-clang15-asan
- docker-image-name: pytorch-linux-jammy-py3-clang15-asan
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 2, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 3, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 4, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 5, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 6, num_shards: 6, runner: "linux.4xlarge" },
- ]}
- sync-tag: asan-build
-
-
- linux-jammy-py3_10-clang15-asan-test:
- name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-jammy-py3_10-clang15-asan-build
- - target-determination
- with:
- build-environment: linux-jammy-py3.10-clang15-asan
- docker-image: ${{ needs.linux-jammy-py3_10-clang15-asan-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-py3_10-clang15-asan-build.outputs.test-matrix }}
- sync-tag: asan-test
-
- linux-focal-py3_8-clang10-onnx-build:
- name: linux-focal-py3.8-clang10-onnx
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-py3.8-clang10-onnx
- docker-image-name: pytorch-linux-focal-py3-clang10-onnx
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
- { config: "default", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
- ]}
-
- linux-focal-py3_8-clang10-onnx-test:
- name: linux-focal-py3.8-clang10-onnx
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-py3_8-clang10-onnx-build
- - target-determination
- with:
- build-environment: linux-focal-py3.8-clang10-onnx
- docker-image: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.test-matrix }}
-
- linux-focal-py3_8-clang10-build:
- name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-py3.8-clang10
- docker-image-name: pytorch-linux-focal-py3.8-clang10
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- { config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
- { config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- ]}
- linux-focal-py3_8-clang10-test:
- name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-py3_8-clang10-build
- - target-determination
- with:
- build-environment: linux-focal-py3.8-clang10
- docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }}
-
- linux-focal-py3_11-clang10-build:
- name: linux-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-py3.11-clang10
- docker-image-name: pytorch-linux-focal-py3.11-clang10
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- { config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
- { config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- ]}
-
-
- linux-focal-py3_11-clang10-test:
- name: linux-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-py3_11-clang10-build
- - target-determination
- with:
- build-environment: linux-focal-py3.11-clang10
- docker-image: ${{ needs.linux-focal-py3_11-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_11-clang10-build.outputs.test-matrix }}
-
- linux-focal-py3_12-clang10-build:
- name: linux-focal-py3.12-clang10
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-py3.12-clang10
- docker-image-name: pytorch-linux-focal-py3.12-clang10
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
- { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
- ]}
-
- linux-focal-py3_12-clang10-test:
- name: linux-focal-py3.12-clang10
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-py3_12-clang10-build
- with:
- build-environment: linux-focal-py3.12-clang10
- docker-image: ${{ needs.linux-focal-py3_12-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_12-clang10-build.outputs.test-matrix }}
- timeout-minutes: 600
-
- linux-focal-cuda11_8-py3_10-gcc9-build:
- name: linux-focal-cuda11.8-py3.10-gcc9
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-cuda11.8-py3.10-gcc9
- docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
- test-matrix: |
- { include: [
- { config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
- { config: "distributed", shard: 2, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
- { config: "distributed", shard: 3, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda11_8-py3_10-gcc9-test:
- name: linux-focal-cuda11.8-py3.10-gcc9
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda11_8-py3_10-gcc9-build
- - target-determination
- with:
- timeout-minutes: 360
- build-environment: linux-focal-cuda11.8-py3.10-gcc9
- docker-image: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-build.outputs.test-matrix }}
-
- linux-focal-cuda12_1-py3_10-gcc9-build:
- name: linux-focal-cuda12.1-py3.10-gcc9
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
- { config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-test:
- name: linux-focal-cuda12.1-py3.10-gcc9
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda12_1-py3_10-gcc9-build
- - target-determination
- with:
- timeout-minutes: 360
- build-environment: linux-focal-cuda12.1-py3.10-gcc9
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }}
-
- linux-jammy-py3-clang12-mobile-build:
- name: linux-jammy-py3-clang12-mobile-build
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3-clang12-mobile-build
- docker-image-name: pytorch-linux-jammy-py3-clang15-asan
- build-generates-artifacts: false
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-jammy-cuda-11_8-cudnn8-py3_8-clang12-build:
- name: linux-jammy-cuda11.8-cudnn8-py3.8-clang12
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-cuda11.8-cudnn8-py3.8-clang12
- docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn8-py3.8-clang12
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-focal-py3-clang9-mobile-custom-build-static:
- name: linux-focal-py3-clang9-mobile-custom-build-static
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-py3-clang9-mobile-custom-build-static
- docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e
- build-generates-artifacts: false
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-focal-py3_8-clang9-xla-build:
- name: linux-focal-py3_8-clang9-xla
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-py3.8-clang9-xla
- docker-image-name: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/xla_base:v1.1-lite
- test-matrix: |
- { include: [
- { config: "xla", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
- ]}
-
- linux-focal-py3_8-clang9-xla-test:
- name: linux-focal-py3_8-clang9-xla
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-py3_8-clang9-xla-build
- with:
- build-environment: linux-focal-py3.8-clang9-xla
- docker-image: ${{ needs.linux-focal-py3_8-clang9-xla-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_8-clang9-xla-build.outputs.test-matrix }}
-
- win-vs2019-cpu-py3-build:
- # don't run build twice on main
- if: github.event_name == 'pull_request'
- name: win-vs2019-cpu-py3
- uses: ./.github/workflows/_win-build.yml
- with:
- build-environment: win-vs2019-cpu-py3
- cuda-version: cpu
- sync-tag: win-cpu-build
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
- { config: "default", shard: 2, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
- { config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
- ]}
-
- linux-focal-cpu-py3_10-gcc9-bazel-test:
- name: linux-focal-cpu-py3.10-gcc9-bazel-test
- uses: ./.github/workflows/_bazel-build-test.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- cuda-version: cpu
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.4xlarge" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-bazel-test:
- name: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
- uses: ./.github/workflows/_bazel-build-test.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- cuda-version: "12.1"
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single:
- name: linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single
- uses: ./.github/workflows/_android-build-test.yml
- with:
- build-environment: linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single
- docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- ]}
-
- linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single-full-jit:
- name: linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single-full-jit
- uses: ./.github/workflows/_android-build-test.yml
- with:
- build-environment: linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single-full-jit
- docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- ]}
-
- linux-jammy-py3_8-gcc11-mobile-lightweight-dispatch-build:
- name: linux-jammy-py3.8-gcc11-mobile-lightweight-dispatch-build
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3.8-gcc111-mobile-lightweight-dispatch-build
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- build-generates-artifacts: false
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-focal-rocm6_1-py3_8-build:
- # don't run build twice on main
- if: github.event_name == 'pull_request'
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- sync-tag: rocm-build
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "linux.rocm.gpu" },
- { config: "default", shard: 2, num_shards: 3, runner: "linux.rocm.gpu" },
- { config: "default", shard: 3, num_shards: 3, runner: "linux.rocm.gpu" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-sm86-build:
- name: linux-focal-cuda12.1-py3.10-gcc9-sm86
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- cuda-arch-list: 8.6
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 2, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 3, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 4, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-sm86-test:
- name: linux-focal-cuda12.1-py3.10-gcc9-sm86
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda12_1-py3_10-gcc9-sm86-build
- - target-determination
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }}
-
- linux-jammy-py3-clang12-executorch-build:
- name: linux-jammy-py3-clang12-executorch
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3-clang12-executorch
- docker-image-name: pytorch-linux-jammy-py3-clang12-executorch
- test-matrix: |
- { include: [
- { config: "executorch", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- ]}
-
- linux-jammy-py3-clang12-executorch-test:
- name: linux-jammy-py3-clang12-executorch
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-jammy-py3-clang12-executorch-build
- with:
- build-environment: linux-jammy-py3-clang12-executorch
- docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
diff --git a/.github/workflows/revert.yml b/.github/workflows/revert.yml
deleted file mode 100644
index c67689b86149f..0000000000000
--- a/.github/workflows/revert.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-name: Revert merged PR
-
-on:
- repository_dispatch:
- types: [try-revert]
-
-jobs:
- do_revert:
- name: try_revert_pr_${{ github.event.client_payload.pr_num }}
- runs-on: linux.20_04.4x
- environment: mergebot
- env:
- GH_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- steps:
- - name: Checkout repo
- uses: actions/checkout@v2
- id: checkout
- with:
- fetch-depth: 0
- token: ${{ secrets.MERGEBOT_TOKEN }}
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- architecture: x64
- check-latest: false
- cache: pip
- - run: pip install pyyaml==6.0
-
- - name: Setup committer id
- run: |
- git config --global user.email "pytorchmergebot@users.noreply.github.com"
- git config --global user.name "PyTorch MergeBot"
- - name: Revert PR
- env:
- GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- COMMENT_ID: ${{ github.event.client_payload.comment_id }}
- REASON: ${{ github.event.client_payload.reason }}
- run: |
- set -ex
- if [ -n "${COMMENT_ID}" ]; then
- if [ -n "${REASON}" ]; then
- python3 .github/scripts/trymerge.py --revert --comment-id "${COMMENT_ID}" --reason "${REASON}" "${PR_NUM}"
- else
- python3 .github/scripts/trymerge.py --revert --comment-id "${COMMENT_ID}" "${PR_NUM}"
- fi
- else
- if [ -n "${REASON}" ]; then
- python3 .github/scripts/trymerge.py --revert --reason "${REASON}" "${PR_NUM}"
- else
- python3 .github/scripts/trymerge.py --revert "${PR_NUM}"
- fi
- fi
- - name: Comment on Canceled
- if: ${{ cancelled() && steps.checkout.outcome == 'success' }}
- continue-on-error: true
- env:
- GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- run: |
- set -ex
- python3 .github/scripts/comment_on_pr.py "${PR_NUM}" "revert"
-
-concurrency: try-revert
diff --git a/.github/workflows/rocm.yml b/.github/workflows/rocm.yml
deleted file mode 100644
index c32abe592bef2..0000000000000
--- a/.github/workflows/rocm.yml
+++ /dev/null
@@ -1,57 +0,0 @@
-name: rocm
-
-on:
- push:
- branches:
- - main
- - release/*
- tags:
- - ciflow/rocm/*
- workflow_dispatch:
- schedule:
- - cron: 29 8 * * * # about 1:29am PDT
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- target-determination:
- name: before-test
- uses: ./.github/workflows/target_determination.yml
- permissions:
- id-token: write
- contents: read
-
- linux-focal-rocm6_1-py3_8-build:
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- sync-tag: rocm-build
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 6, runner: "linux.rocm.gpu.2" },
- { config: "default", shard: 2, num_shards: 6, runner: "linux.rocm.gpu.2" },
- { config: "default", shard: 3, num_shards: 6, runner: "linux.rocm.gpu.2" },
- { config: "default", shard: 4, num_shards: 6, runner: "linux.rocm.gpu.2" },
- { config: "default", shard: 5, num_shards: 6, runner: "linux.rocm.gpu.2" },
- { config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.2" },
- ]}
-
- linux-focal-rocm6_1-py3_8-test:
- permissions:
- id-token: write
- contents: read
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_rocm-test.yml
- needs:
- - linux-focal-rocm6_1-py3_8-build
- - target-determination
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
deleted file mode 100644
index cd4258c478763..0000000000000
--- a/.github/workflows/scorecards.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: ossf-scorecard
-on:
- # Only the default branch is supported.
- branch_protection_rule:
- workflow_dispatch:
- schedule:
- - cron: '32 16 * * 3'
- push:
- branches: [ "main" ]
-
-# Declare default permissions as read only.
-permissions: read-all
-
-jobs:
- analysis:
- name: Scorecards analysis
- runs-on: ubuntu-latest
- permissions:
- # Needed to upload the results to code-scanning dashboard.
- security-events: write
- # Used to receive a badge.
- id-token: write
-
- if: false && github.repository == 'pytorch/pytorch' # don't run on forks
-
- steps:
- - name: "Checkout code"
- uses: actions/checkout@v3
- with:
- persist-credentials: false
-
- - name: "Run analysis"
- uses: ossf/scorecard-action@865b4092859256271290c77adbd10a43f4779972 # tag=v2.0.3
- with:
- results_file: results.sarif
- results_format: sarif
-
- # Publish the results for public repositories to enable scorecard badges. For more details, see
- # https://github.com/ossf/scorecard-action#publishing-results.
- publish_results: true
-
- # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
- # format to the repository Actions tab.
- - name: "Upload artifact"
- uses: actions/upload-artifact@v3
- with:
- name: SARIF file
- path: results.sarif
- retention-days: 5
-
- # Upload the results to GitHub's code scanning dashboard.
- - name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # tag=v1.0.26
- with:
- sarif_file: results.sarif
diff --git a/.github/workflows/slow.yml b/.github/workflows/slow.yml
deleted file mode 100644
index 31db7af8fc550..0000000000000
--- a/.github/workflows/slow.yml
+++ /dev/null
@@ -1,163 +0,0 @@
-# This workflow is dedicated to host slow jobs that are run only periodically because
-# they are too slow to run in every commit. The list of slow tests can be found in
-# https://github.com/pytorch/test-infra/blob/generated-stats/stats/slow-tests.json
-name: slow
-
-on:
- schedule:
- - cron: 45 0,4,8,12,16,20 * * *
- - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
- push:
- tags:
- - ciflow/slow/*
- branches:
- - release/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- llm-td:
- name: before-test
- uses: ./.github/workflows/llm_td_retrieval.yml
- permissions:
- id-token: write
- contents: read
-
- target-determination:
- name: before-test
- uses: ./.github/workflows/target_determination.yml
- needs: llm-td
- permissions:
- id-token: write
- contents: read
-
- linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build:
- name: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- cuda-arch-list: 8.6
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 2, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 3, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 4, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 5, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 6, num_shards: 6, runner: "linux.g5.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-test:
- name: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build
- - target-determination
- with:
- build-environment: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
- docker-image: ${{ needs.linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build.outputs.test-matrix }}
- timeout-minutes: 300
-
- linux-focal-cuda12_1-py3_10-gcc9-sm86-build:
- name: linux-focal-cuda12.1-py3.10-gcc9-sm86
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- cuda-arch-list: 8.6
- test-matrix: |
- { include: [
- { config: "slow", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- { config: "slow", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-sm86-test:
- name: linux-focal-cuda12.1-py3.10-gcc9-sm86
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda12_1-py3_10-gcc9-sm86-build
- - target-determination
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-sm86-build.outputs.test-matrix }}
-
- linux-focal-py3_8-clang10-build:
- name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-py3.8-clang10
- docker-image-name: pytorch-linux-focal-py3.8-clang10
- test-matrix: |
- { include: [
- { config: "slow", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- ]}
-
- linux-focal-py3_8-clang10-test:
- name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-py3_8-clang10-build
- - target-determination
- with:
- build-environment: linux-focal-py3.8-clang10
- docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }}
-
- linux-focal-rocm6_1-py3_8-build:
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- test-matrix: |
- { include: [
- { config: "slow", shard: 1, num_shards: 1, runner: "linux.rocm.gpu" },
- ]}
-
- linux-focal-rocm6_1-py3_8-test:
- permissions:
- id-token: write
- contents: read
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_rocm-test.yml
- needs:
- - linux-focal-rocm6_1-py3_8-build
- - target-determination
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
-
- linux-jammy-py3_10-clang15-asan-build:
- name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-jammy-py3.10-clang15-asan
- docker-image-name: pytorch-linux-jammy-py3-clang15-asan
- test-matrix: |
- { include: [
- { config: "slow", shard: 1, num_shards: 3, runner: "linux.4xlarge" },
- { config: "slow", shard: 2, num_shards: 3, runner: "linux.4xlarge" },
- { config: "slow", shard: 3, num_shards: 3, runner: "linux.4xlarge" },
- ]}
- sync-tag: asan-build
-
- linux-jammy-py3_10-clang15-asan-test:
- name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-jammy-py3_10-clang15-asan-build
- - target-determination
- with:
- build-environment: linux-jammy-py3.10-clang15-asan
- docker-image: ${{ needs.linux-jammy-py3_10-clang15-asan-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-py3_10-clang15-asan-build.outputs.test-matrix }}
- sync-tag: asan-test
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
deleted file mode 100644
index 56e349dfa1b82..0000000000000
--- a/.github/workflows/stale.yml
+++ /dev/null
@@ -1,156 +0,0 @@
-# A workflow that implements similar logic to actions/stale.
-#
-# Compared to actions/stale, it is implemented to make API requests proportional
-# to the number of stale PRs, not the total number of issues in the repo. This
-# is because PyTorch has a lot of issues/PRs, so the actions/stale runs into
-# rate limits way too quickly.
-#
-# The behavior is:
-# - If a PR is not labeled stale, after 60 days inactivity label the PR as stale and comment about it.
-# - If a PR is labeled stale, after 30 days inactivity close the PR.
-# - `high priority` and `no-stale` PRs are exempt.
-
-name: Close stale pull requests
-
-on:
- schedule:
- # Run hourly.
- - cron: 30 * * * *
- workflow_dispatch:
-
-jobs:
- stale:
- if: ${{ github.repository == 'pytorch/pytorch' }}
- runs-on: linux.large.arc
- permissions:
- contents: read
- pull-requests: write
-
- steps:
- - uses: actions/github-script@v6
- with:
- script: |
- // Do some dumb retries on requests.
- const retries = 7;
- const baseBackoff = 100;
- const sleep = timeout => new Promise(resolve => setTimeout(resolve, timeout));
- github.hook.wrap('request', async (request, options) => {
- for (let attempt = 1; attempt <= retries; attempt++) {
- try {
- return await request(options);
- } catch (err) {
- if (attempt < retries) {
- core.warning(`Request getting retried. Attempt: ${attempt}`);
- await sleep(baseBackoff * Math.pow(2, attempt));
- continue;
- }
- throw err;
- }
- }
- });
-
- const MAX_API_REQUESTS = 100;
-
- // If a PRs not labeled stale, label them stale after no update for 60 days.
- const STALE_LABEL_THRESHOLD_MS = 1000 * 60 * 60 * 24 * 60;
- // For PRs already labeled stale, close after not update for 30 days.
- const STALE_CLOSE_THRESHOLD_MS = 1000 * 60 * 60 * 24 * 30;
-
- const STALE_MESSAGE =
- "Looks like this PR hasn't been updated in a while so we're going to go ahead and mark this as `Stale`.
" +
- "Feel free to remove the `Stale` label if you feel this was a mistake.
" +
- "If you are unable to remove the `Stale` label please contact a maintainer in order to do so.
" +
- "If you want the bot to never mark this PR stale again, add the `no-stale` label.
" +
- "`Stale` pull requests will automatically be closed after 30 days of inactivity.
";
-
- let numAPIRequests = 0;
- let numProcessed = 0;
-
- async function processPull(pull) {
- core.info(`[${pull.number}] URL: ${pull.html_url}`);
- numProcessed += 1;
- const labels = pull.labels.map((label) => label.name);
-
- // Skip if certain labels are present.
- if (labels.includes("no-stale") || labels.includes("high priority")) {
- core.info(`[${pull.number}] Skipping because PR has an exempting label.`);
- return false;
- }
-
- // Check if the PR is stale, according to our configured thresholds.
- let staleThresholdMillis;
- if (labels.includes("Stale")) {
- core.info(`[${pull.number}] PR is labeled stale, checking whether we should close it.`);
- staleThresholdMillis = STALE_CLOSE_THRESHOLD_MS;
- } else {
- core.info(`[${pull.number}] Checking whether to label PR as stale.`);
- staleThresholdMillis = STALE_LABEL_THRESHOLD_MS;
- }
-
- const millisSinceLastUpdated =
- new Date().getTime() - new Date(pull.updated_at).getTime();
-
- if (millisSinceLastUpdated < staleThresholdMillis) {
- core.info(`[${pull.number}] Skipping because PR was updated recently`);
- return false;
- }
-
- // At this point, we know we should do something.
- // For PRs already labeled stale, close them.
- if (labels.includes("Stale")) {
- core.info(`[${pull.number}] Closing PR.`);
- numAPIRequests += 1;
- await github.rest.issues.update({
- owner: "pytorch",
- repo: "pytorch",
- issue_number: pull.number,
- state: "closed",
- });
- } else {
- // For PRs not labeled stale, label them stale.
- core.info(`[${pull.number}] Labeling PR as stale.`);
-
- numAPIRequests += 1;
- await github.rest.issues.createComment({
- owner: "pytorch",
- repo: "pytorch",
- issue_number: pull.number,
- body: STALE_MESSAGE,
- });
-
- numAPIRequests += 1;
- await github.rest.issues.addLabels({
- owner: "pytorch",
- repo: "pytorch",
- issue_number: pull.number,
- labels: ["Stale"],
- });
- }
- }
-
- for await (const response of github.paginate.iterator(
- github.rest.pulls.list,
- {
- owner: "pytorch",
- repo: "pytorch",
- state: "open",
- sort: "created",
- direction: "asc",
- per_page: 100,
- }
- )) {
- numAPIRequests += 1;
- const pulls = response.data;
- // Awaiting in a loop is intentional here. We want to serialize execution so
- // that log groups are printed correctl
- for (const pull of pulls) {
- if (numAPIRequests > MAX_API_REQUESTS) {
- core.warning("Max API requests exceeded, exiting.");
- process.exit(0);
- }
- await core.group(`Processing PR #${pull.number}`, async () => {
- await processPull(pull);
- });
- }
- }
- core.info(`Processed ${numProcessed} PRs total.`);
diff --git a/.github/workflows/target-determination-indexer.yml b/.github/workflows/target-determination-indexer.yml
deleted file mode 100644
index 0ce1bae6a4138..0000000000000
--- a/.github/workflows/target-determination-indexer.yml
+++ /dev/null
@@ -1,144 +0,0 @@
-name: Index PyTorch Tests for Target Determination
-
-on:
- workflow_dispatch:
- schedule:
- - cron: '0 0 * * *'
-
-permissions:
- id-token: write
- contents: read
-
-jobs:
- index:
- runs-on: linux.g5.4xlarge.nvidia.gpu # 1 GPU A10G 24GB each
- environment: target-determinator-env
- steps:
- - name: Clone PyTorch
- uses: actions/checkout@v3
- with:
- path: pytorch
-
- - name: Setup Linux
- uses: ./pytorch/.github/actions/setup-linux
-
- - name: Calculate docker image
- id: calculate-docker-image
- uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
- with:
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- working-directory: pytorch
-
- - name: Use following to pull public copy of the image
- id: print-ghcr-mirror
- env:
- ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- shell: bash
- run: |
- tag=${ECR_DOCKER_IMAGE##*/}
- echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
-
- - name: Pull docker image
- uses: pytorch/test-infra/.github/actions/pull-docker-image@main
- with:
- docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
-
- - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
- id: install-nvidia-driver
- uses: pytorch/test-infra/.github/actions/setup-nvidia@main
-
- - name: Clone CodeLlama
- uses: actions/checkout@v3
- with:
- repository: osalpekar/codellama
- ref: 1ec50e0cfc0fadc3b6ceb146617e2119ab26eb34
- path: codellama
-
- - name: Clone Target Determination Code
- uses: actions/checkout@v3
- with:
- repository: osalpekar/llm-target-determinator
- ref: v0.0.2
- path: llm-target-determinator
-
- - name: Configure AWS credentials
- uses: aws-actions/configure-aws-credentials@v3
- with:
- role-to-assume: arn:aws:iam::308535385114:role/gha_target_determinator_s3_read_write
- aws-region: us-east-1
-
- - name: Download checkpoint
- shell: bash
- env:
- AWS_DEFAULT_REGION: us-east-1
- run: |
- # Do this outside of docker so I don't have to put env vars in
- pip3 install awscli==1.29.40
- cd codellama
- mkdir "CodeLlama-7b-Python"
- aws s3 cp \
- "s3://target-determinator-assets/CodeLlama-7b-Python" \
- "CodeLlama-7b-Python" \
- --recursive
-
- - name: Run indexer
- shell: bash -l {0}
- env:
- DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- AWS_DEFAULT_REGION: us-east-1
- run: |
- # detached container should get cleaned up by teardown_ec2_linux
- container_name=$(docker run \
- ${GPU_FLAG:-} \
- -e MAX_JOBS="$(nproc --ignore=2)" \
- -e AWS_DEFAULT_REGION \
- --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
- --security-opt seccomp=unconfined \
- --cap-add=SYS_PTRACE \
- --tty \
- --detach \
- --user jenkins \
- -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
- -w /var/lib/jenkins/workspace \
- "${DOCKER_IMAGE}"
- )
- chmod +x pytorch/.github/scripts/td_llm_indexer.sh
- docker exec -t "${container_name}" sh -c 'pytorch/.github/scripts/td_llm_indexer.sh'
-
- - name: Upload to s3
- shell: bash -l {0}
- env:
- AWS_DEFAULT_REGION: us-east-1
- run: |
- cd llm-target-determinator/assets
-
- TIMESTAMP=$(date -Iseconds)
- ZIP_NAME="indexer-files-${TIMESTAMP}.zip"
-
- # Create a zipfile with all the generated indices
- zip -r "${ZIP_NAME}" indexer-files
-
- # Note that because the below 2 operations are not atomic, there will
- # be a period of a few seconds between these where there is no index
- # present in the latest/ folder. To account for this, the retriever
- # should have some retry logic with backoff to ensure fetching the
- # index doesn't fail.
- # Move the old index into the archived/ folder
- aws s3 mv \
- "s3://target-determinator-assets/indexes/latest" \
- "s3://target-determinator-assets/indexes/archived" \
- --recursive
-
- # Move the new index into the latestl/ folder
- aws s3 cp \
- "${ZIP_NAME}" \
- "s3://target-determinator-assets/indexes/latest/${ZIP_NAME}"
-
- - name: Teardown Linux
- uses: pytorch/test-infra/.github/actions/teardown-linux@main
- if: always()
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
diff --git a/.github/workflows/target_determination.yml b/.github/workflows/target_determination.yml
deleted file mode 100644
index cd5e758345b59..0000000000000
--- a/.github/workflows/target_determination.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-name: target-determination
-
-on:
- workflow_call:
-
-jobs:
- target-determination:
- # Don't run on forked repos
- if: github.repository_owner == 'pytorch'
- runs-on: linux.2xlarge
- steps:
- # [pytorch repo ref]
- # Use a pytorch/pytorch reference instead of a reference to the local
- # checkout because when we run this action we don't *have* a local
- # checkout. In other cases you should prefer a local checkout.
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
-
- - name: Setup Linux
- uses: ./.github/actions/setup-linux
-
- - name: Get workflow job id
- id: get-job-id
- uses: ./.github/actions/get-workflow-job-id
- if: always()
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Download pytest cache
- uses: ./.github/actions/pytest-cache-download
- continue-on-error: true
- with:
- cache_dir: .pytest_cache
- job_identifier: ${{ github.workflow }}
-
- - name: Download LLM Artifacts from S3
- uses: seemethere/download-artifact-s3@v4
- continue-on-error: true
- with:
- name: llm_results
- path: .additional_ci_files/llm_results
-
- - name: Do TD
- id: td
- continue-on-error: true
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- GITHUB_REPOSITORY: ${{ github.repository }}
- GITHUB_WORKFLOW: ${{ github.workflow }}
- GITHUB_JOB: ${{ github.job }}
- GITHUB_RUN_ID: ${{ github.run_id }}
- GITHUB_RUN_NUMBER: ${{ github.run_number }}
- GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
- GITHUB_REF: ${{ github.ref }}
- JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
- JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- run: |
- unzip -o .additional_ci_files/llm_results/mappings.zip -d .additional_ci_files/llm_results || true
- python3 -m pip install boto3==1.19.12
- python3 tools/testing/do_target_determination_for_s3.py
-
- - name: Upload TD results to s3
- uses: seemethere/upload-artifact-s3@v5
- if: steps.td.outcome == 'success'
- with:
- name: td_results
- retention-days: 14
- if-no-files-found: error
- path: td_results.json
-
- - name: Store TD results on GHA
- uses: actions/upload-artifact@v3
- if: steps.td.outcome == 'success'
- with:
- name: td_results.json
- retention-days: 14
- if-no-files-found: error
- path: td_results.json
diff --git a/.github/workflows/torchbench.yml b/.github/workflows/torchbench.yml
deleted file mode 100644
index 73befe34c0782..0000000000000
--- a/.github/workflows/torchbench.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: torchbench
-
-on:
- push:
- tags:
- - ciflow/torchbench/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-jobs:
- linux-focal-cuda12_1-py3_10-gcc9-torchbench-build-gcp:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9-inductor-benchmarks
- cuda-arch-list: '8.0'
- test-matrix: |
- { include: [
- { config: "torchbench_gcp_smoketest", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
- ]}
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
-
- linux-focal-cuda12_1-py3_10-gcc9-torchbench-test-gcp:
- name: cuda12.1-py3.10-gcc9-sm80
- uses: ./.github/workflows/_linux-test.yml
- needs: linux-focal-cuda12_1-py3_10-gcc9-torchbench-build-gcp
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-torchbench-build-gcp.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-torchbench-build-gcp.outputs.test-matrix }}
- use-gha: anything-non-empty-to-use-gha
- secrets:
- HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml
deleted file mode 100644
index 3414f23f690f8..0000000000000
--- a/.github/workflows/trunk.yml
+++ /dev/null
@@ -1,211 +0,0 @@
-name: trunk
-
-on:
- push:
- branches:
- - main
- - release/*
- - landchecks/*
- tags:
- - ciflow/trunk/*
- workflow_dispatch:
- schedule:
- - cron: 29 8 * * * # about 1:29am PDT
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- llm-td:
- name: before-test
- uses: ./.github/workflows/llm_td_retrieval.yml
- permissions:
- id-token: write
- contents: read
-
- target-determination:
- name: before-test
- uses: ./.github/workflows/target_determination.yml
- needs: llm-td
- permissions:
- id-token: write
- contents: read
-
- linux-focal-cuda12_1-py3_10-gcc9-build:
- name: linux-focal-cuda12.1-py3.10-gcc9
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- test-matrix: |
- { include: [
- { config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- { config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
- ]}
-
- linux-focal-cuda12_1-py3_10-gcc9-test:
- name: linux-focal-cuda12.1-py3.10-gcc9
- uses: ./.github/workflows/_linux-test.yml
- needs:
- - linux-focal-cuda12_1-py3_10-gcc9-build
- - target-determination
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9
- docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-build.outputs.test-matrix }}
-
- libtorch-linux-focal-cuda12_1-py3_7-gcc9-debug-build:
- name: libtorch-linux-focal-cuda12.1-py3.7-gcc9-debug
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: libtorch-linux-focal-cuda12.1-py3.7-gcc9
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- build-generates-artifacts: false
- runner: linux.4xlarge
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- # no-ops builds test USE_PER_OPERATOR_HEADERS=0 where ATen/ops is not generated
- linux-focal-cuda12_1-py3_10-gcc9-no-ops-build:
- name: linux-focal-cuda12.1-py3.10-gcc9-no-ops
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-focal-cuda12.1-py3.10-gcc9-no-ops
- docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- pytorch-linux-focal-py3-clang9-android-ndk-r21e-build:
- name: pytorch-linux-focal-py3-clang9-android-ndk-r21e-build
- uses: ./.github/workflows/_android-full-build-test.yml
- with:
- build-environment: pytorch-linux-focal-py3-clang9-android-ndk-r21e-build
- docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
- ]}
-
- macos-13-py3-arm64-build:
- name: macos-13-py3-arm64
- uses: ./.github/workflows/_mac-build.yml
- with:
- sync-tag: macos-py3-arm64-build
- build-environment: macos-13-py3-arm64
- runner-type: macos-m1-stable
- build-generates-artifacts: true
- # To match the one pre-installed in the m1 runners
- python-version: 3.9.12
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "macos-m1-stable" },
- { config: "default", shard: 2, num_shards: 3, runner: "macos-m1-stable" },
- { config: "default", shard: 3, num_shards: 3, runner: "macos-m1-stable" },
- ]}
-
- macos-py3-arm64-mps-test:
- name: macos-py3-arm64-mps
- uses: ./.github/workflows/_mac-test-mps.yml
- needs: macos-13-py3-arm64-build
- if: needs.macos-13-py3-arm64-build.outputs.build-outcome == 'success'
- with:
- sync-tag: macos-py3-arm64-mps-test
- build-environment: macos-13-py3-arm64
- # Same as the build job
- python-version: 3.9.12
- test-matrix: |
- { include: [
- { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-stable" },
- { config: "mps", shard: 1, num_shards: 1, runner: "macos-m1-14" },
-
- ]}
-
- macos-13-py3-arm64-test:
- name: macos-13-py3-arm64
- uses: ./.github/workflows/_mac-test.yml
- needs:
- - macos-13-py3-arm64-build
- - target-determination
- with:
- build-environment: macos-13-py3-arm64
- # Same as the build job
- python-version: 3.9.12
- test-matrix: ${{ needs.macos-13-py3-arm64-build.outputs.test-matrix }}
-
- win-vs2019-cpu-py3-build:
- name: win-vs2019-cpu-py3
- uses: ./.github/workflows/_win-build.yml
- with:
- build-environment: win-vs2019-cpu-py3
- cuda-version: cpu
- sync-tag: win-cpu-build
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
- { config: "default", shard: 2, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
- { config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
- ]}
-
- win-vs2019-cpu-py3-test:
- name: win-vs2019-cpu-py3
- uses: ./.github/workflows/_win-test.yml
- needs:
- - win-vs2019-cpu-py3-build
- - target-determination
- with:
- build-environment: win-vs2019-cpu-py3
- cuda-version: cpu
- test-matrix: ${{ needs.win-vs2019-cpu-py3-build.outputs.test-matrix }}
-
- win-vs2019-cuda11_8-py3-build:
- name: win-vs2019-cuda11.8-py3
- uses: ./.github/workflows/_win-build.yml
- with:
- build-environment: win-vs2019-cuda11.8-py3
- cuda-version: "11.8"
- sync-tag: win-cuda-build
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 2, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 3, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 4, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 5, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "default", shard: 6, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
- { config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge.nonephemeral" },
- ]}
-
- linux-focal-rocm6_1-py3_8-build:
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_linux-build-label.yml
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image-name: pytorch-linux-focal-rocm-n-py3
- sync-tag: rocm-build
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.rocm.gpu" },
- ]}
-
- linux-focal-rocm6_1-py3_8-test:
- permissions:
- id-token: write
- contents: read
- name: linux-focal-rocm6.1-py3.8
- uses: ./.github/workflows/_rocm-test.yml
- needs:
- - linux-focal-rocm6_1-py3_8-build
- - target-determination
- with:
- build-environment: linux-focal-rocm6.1-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
- tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor"
diff --git a/.github/workflows/trymerge.yml b/.github/workflows/trymerge.yml
deleted file mode 100644
index db0b80b32fa4c..0000000000000
--- a/.github/workflows/trymerge.yml
+++ /dev/null
@@ -1,90 +0,0 @@
-name: Validate and merge PR
-
-on:
- repository_dispatch:
- types: [try-merge]
-
-jobs:
- do_merge:
- name: try_merge_pr_${{ github.event.client_payload.pr_num }}
- runs-on: linux.20_04.4x
- environment: mergebot
- env:
- GH_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- steps:
- - name: Checkout repo
- id: checkout
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
- token: ${{ secrets.MERGEBOT_TOKEN }}
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- check-latest: false
- cache: pip
- architecture: x64
- - run: pip install pyyaml==6.0 rockset==1.0.3
-
- - name: Setup committer id
- run: |
- git config --global user.email "pytorchmergebot@users.noreply.github.com"
- git config --global user.name "PyTorch MergeBot"
- - name: Merge PR
- shell: bash
- env:
- GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- FORCE: ${{ github.event.client_payload.force}}
- COMMENT_ID: ${{ github.event.client_payload.comment_id }}
- REBASE: ${{ github.event.client_payload.rebase }}
- IGNORE_CURRENT: ${{ github.event.client_payload.ignore_current }}
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- DRCI_BOT_KEY: ${{ secrets.DRCI_BOT_KEY }}
- run: |
- set -x
- if [ -n "${REBASE}" ]; then
- # attempt to rebase, if it fails then comment on the PR that it failed
- if ! python3 .github/scripts/tryrebase.py "${PR_NUM}" --branch "${REBASE}"; then
- python3 .github/scripts/comment_on_pr.py "${PR_NUM}" "merge"
- exit 0
- fi
- git checkout main
- git fetch -p
- # give github some time between the push and start workflows so that Github's messages
- # on the PR appear in chronological order (timing issues can shuffle them around)
- sleep 60
- fi
- if [ -n "${FORCE}" ]; then
- if [ -n "${COMMENT_ID}" ]; then
- python3 .github/scripts/trymerge.py --force --comment-id "${COMMENT_ID}" "${PR_NUM}"
- else
- python3 .github/scripts/trymerge.py --force "${PR_NUM}"
- fi
- elif [ -n "${IGNORE_CURRENT}" ]; then
- if [ -n "${COMMENT_ID}" ]; then
- python3 .github/scripts/trymerge.py --ignore-current --comment-id "${COMMENT_ID}" "${PR_NUM}"
- else
- python3 .github/scripts/trymerge.py --ignore-current "${PR_NUM}"
- fi
- elif [ -n "${COMMENT_ID}" ]; then
- python3 .github/scripts/trymerge.py --comment-id "${COMMENT_ID}" "${PR_NUM}"
- else
- python3 .github/scripts/trymerge.py "${PR_NUM}"
- fi
- - name: Comment on Canceled
- if: ${{ cancelled() && steps.checkout.outcome == 'success' }}
- continue-on-error: true
- env:
- GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- run: |
- set -x
- python3 .github/scripts/comment_on_pr.py "${PR_NUM}" "merge"
-
-# We want newer merge commands to supercede old ones
-concurrency:
- group: try-merge-${{ github.event.client_payload.pr_num }}
- cancel-in-progress: true
diff --git a/.github/workflows/tryrebase.yml b/.github/workflows/tryrebase.yml
deleted file mode 100644
index e69d5f9fdd319..0000000000000
--- a/.github/workflows/tryrebase.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: Rebase PR
-
-on:
- repository_dispatch:
- types: [try-rebase]
-
-jobs:
- do_rebase:
- runs-on: ubuntu-20.04
- environment: mergebot
- env:
- GH_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
- steps:
- - name: Checkout repo
- id: checkout
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
- token: ${{ secrets.MERGEBOT_TOKEN }}
-
- - name: Setup Python
- uses: actions/setup-python@v4
- with:
- python-version: '3.8'
- architecture: x64
- check-latest: false
- cache: pip
- - run: pip install pyyaml==6.0
-
- - name: Setup committer id
- run: |
- git config --global user.email "pytorchmergebot@users.noreply.github.com"
- git config --global user.name "PyTorch MergeBot"
-
- - name: Rebase
- env:
- GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- BRANCH: ${{ github.event.client_payload.branch }}
- run: |
- set -x
- if [ -n "${BRANCH}" ]; then
- python3 .github/scripts/tryrebase.py "${PR_NUM}" --branch "${BRANCH}"
- else
- python3 .github/scripts/tryrebase.py "${PR_NUM}"
- fi
- - name: Comment on Canceled
- if: ${{ cancelled() && steps.checkout.outcome == 'success' }}
- continue-on-error: true
- env:
- GITHUB_TOKEN: ${{ secrets.MERGEBOT_TOKEN }}
- PR_NUM: ${{ github.event.client_payload.pr_num }}
- run: |
- set -ex
- python3 .github/scripts/comment_on_pr.py "${PR_NUM}" "rebase"
diff --git a/.github/workflows/unstable-periodic.yml b/.github/workflows/unstable-periodic.yml
deleted file mode 100644
index 9a41bbd44f268..0000000000000
--- a/.github/workflows/unstable-periodic.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-name: unstable-periodic
-
-on:
- schedule:
- - cron: 45 0,4,8,12,16,20 * * *
- - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
- push:
- tags:
- - ciflow/unstable/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- # There must be at least one job here to satisfy GitHub action workflow syntax
- introduction:
- if: github.repository_owner == 'pytorch'
- runs-on: ubuntu-latest
- continue-on-error: true
- steps:
- - name: Introduce PyTorch unstable (periodic) workflow
- run: |
- echo "PyTorch unstable workflow is used to host experimental or flaky jobs"
- echo " that needs to be run periodically, but doesn't impact trunk as part"
- echo " of the stable periodic workflows."
- echo
- echo "In addition, a new label called ciflow/unstable can be attached to the"
- echo " PR to trigger this workflow. That can be done either manually or"
- echo " automatically using PyTorch auto-label bot."
- echo
- echo "Once the jobs are deemed stable enough (% red signal < 5% and TTS < 3h),"
- echo " they can graduate and move back to periodic."
diff --git a/.github/workflows/unstable.yml b/.github/workflows/unstable.yml
deleted file mode 100644
index ac1d49d1cce57..0000000000000
--- a/.github/workflows/unstable.yml
+++ /dev/null
@@ -1,205 +0,0 @@
-name: unstable
-
-on:
- push:
- branches:
- - main
- tags:
- - ciflow/unstable/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
-
-permissions: read-all
-
-jobs:
- # There must be at least one job here to satisfy GitHub action workflow syntax
- introduction:
- runs-on: ubuntu-latest
- continue-on-error: true
- steps:
- - name: Introduce PyTorch unstable workflow
- run: |
- echo "PyTorch unstable workflow is used to host experimental or flaky jobs"
- echo " that needs to be run for every commit, but doesn't block PR merging"
- echo " as part of the stable pull or trunk workflows."
- echo
- echo "In addition, a new label called ciflow/unstable can be attached to the"
- echo " PR to trigger this workflow. That can be done either manually or"
- echo " automatically using PyTorch auto-label bot."
- echo
- echo "Once the jobs are deemed stable enough (% red signal < 5% and TTS < 3h),"
- echo " they can graduate and move back to pull or trunk."
-
- #
- # Experimental ARC jobs
- #
- llm-td:
- name: before-test
- uses: ./.github/workflows/llm_td_retrieval.yml
- permissions:
- id-token: write
- contents: read
-
- target-determination:
- name: before-test
- uses: ./.github/workflows/target_determination.yml
- needs: llm-td
- permissions:
- id-token: write
- contents: read
-
- linux-jammy-py3_8-gcc11-build:
- name: linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "docs_test", shard: 1, num_shards: 1, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "jit_legacy", shard: 1, num_shards: 1, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "backwards_compat", shard: 1, num_shards: 1, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "distributed", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "distributed", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- ]}
-
- linux-jammy-py3_8-gcc11-test:
- name: linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-test-rg.yml
- needs:
- - linux-jammy-py3_8-gcc11-build
- - target-determination
- with:
- build-environment: linux-jammy-py3.8-gcc11
- docker-image: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-py3_8-gcc11-build.outputs.test-matrix }}
-
- linux-jammy-py3_8-gcc11-no-ops:
- name: linux-jammy-py3.8-gcc11-no-ops
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11-no-ops
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-jammy-py3_8-gcc11-pch:
- name: linux-jammy-py3.8-gcc11-pch
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-jammy-py3.8-gcc11-pch
- docker-image-name: pytorch-linux-jammy-py3.8-gcc11
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 1 },
- ]}
-
- linux-focal-py3_8-clang10-onnx-build:
- name: linux-focal-py3.8-clang10-onnx
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-focal-py3.8-clang10-onnx
- docker-image-name: pytorch-linux-focal-py3-clang10-onnx
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- ]}
-
- linux-focal-py3_8-clang10-onnx-test:
- name: linux-focal-py3.8-clang10-onnx
- uses: ./.github/workflows/_linux-test-rg.yml
- needs:
- - linux-focal-py3_8-clang10-onnx-build
- - target-determination
- with:
- build-environment: linux-focal-py3.8-clang10-onnx
- docker-image: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_8-clang10-onnx-build.outputs.test-matrix }}
-
- linux-jammy-py3_10-clang15-asan-build:
- name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-jammy-py3.10-clang15-asan
- docker-image-name: pytorch-linux-jammy-py3-clang15-asan
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 2, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 3, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 4, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 5, num_shards: 6, runner: "linux.4xlarge" },
- { config: "default", shard: 6, num_shards: 6, runner: "linux.4xlarge" },
- ]}
- sync-tag: asan-build-arc
-
- linux-focal-py3_8-clang10-build:
- name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-focal-py3.8-clang10
- docker-image-name: pytorch-linux-focal-py3.8-clang10
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "crossref", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "crossref", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "dynamo", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "dynamo", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "dynamo", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- ]}
-
- linux-focal-py3_8-clang10-test:
- name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-test-rg.yml
- needs:
- - linux-focal-py3_8-clang10-build
- - target-determination
- with:
- build-environment: linux-focal-py3.8-clang10
- docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }}
-
- linux-focal-py3_11-clang10-build:
- name: linux-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-build-rg.yml
- with:
- build-environment: linux-focal-py3.11-clang10
- docker-image-name: pytorch-linux-focal-py3.11-clang10
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "default", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "crossref", shard: 1, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "crossref", shard: 2, num_shards: 2, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "dynamo", shard: 1, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "dynamo", shard: 2, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- { config: "dynamo", shard: 3, num_shards: 3, runner: "arc-lf-linux.2xlarge.avx512" },
- ]}
-
- linux-focal-py3_11-clang10-test:
- name: linux-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-test-rg.yml
- needs:
- - linux-focal-py3_11-clang10-build
- - target-determination
- with:
- build-environment: linux-focal-py3.11-clang10
- docker-image: ${{ needs.linux-focal-py3_11-clang10-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-py3_11-clang10-build.outputs.test-matrix }}
-
- #
- # End of Experimental ARC jobs
- #
diff --git a/.github/workflows/update-viablestrict.yml b/.github/workflows/update-viablestrict.yml
deleted file mode 100644
index 94a712b377484..0000000000000
--- a/.github/workflows/update-viablestrict.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: Update viable/strict
-
-on:
- schedule:
- - cron: 17,47 * * * *
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}
- cancel-in-progress: false
-
-jobs:
- do_update_viablestrict:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: ubuntu-20.04
- environment: ${{ (github.event_name == 'schedule') && 'mergebot' || '' }}
- steps:
- - name: Update viable/strict
- uses: pytorch/test-infra/.github/actions/update-viablestrict@main
- with:
- repository: pytorch/pytorch
- stable-branch: viable/strict
- requires: '[\"pull\", \"trunk\", \"lint\", \"linux-binary\"]'
- secret-bot-token: ${{ secrets.MERGEBOT_TOKEN }}
- rockset-api-key: ${{ secrets.ROCKSET_API_KEY }}
diff --git a/.github/workflows/update_pytorch_labels.yml b/.github/workflows/update_pytorch_labels.yml
deleted file mode 100644
index db09474fb2120..0000000000000
--- a/.github/workflows/update_pytorch_labels.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-name: Update PyTorch Labels in S3
-
-on:
- label:
- workflow_dispatch:
-
-concurrency:
- group: 1
- cancel-in-progress: true
-
-jobs:
- update-labels-in-S3:
- runs-on: ubuntu-22.04
- if: ${{ github.repository == 'pytorch/pytorch' }}
- permissions:
- id-token: write
- contents: read
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- fetch-depth: 1
- submodules: false
- - name: configure aws credentials
- id: aws_creds
- uses: aws-actions/configure-aws-credentials@v4
- with:
- role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_update_pytorch_labels
- aws-region: us-east-1
- - name: Update PyTorch labels list in S3
- run: |
- python3 -m pip install boto3==1.19.12
- .github/scripts/export_pytorch_labels.py pytorch pytorch
diff --git a/.github/workflows/upload-alerts.yml b/.github/workflows/upload-alerts.yml
deleted file mode 100644
index 77c82f1f04cd9..0000000000000
--- a/.github/workflows/upload-alerts.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-# upload alerts every 10 minutes
-
-name: Upload Alerts to AWS/Rockset
-
-on:
- schedule:
- - cron: '*/10 * * * *'
- pull_request:
- paths:
- - 'tools/alerts/create_alerts.py'
- - '.github/workflows/upload-alerts.yml'
-
-jobs:
- upload-alerts:
- if: ${{ github.repository_owner == 'pytorch' }}
- runs-on: ubuntu-22.04
- environment: upload-stats
- steps:
- - name: Checkout repo
- uses: actions/checkout@v3
- with:
- fetch-depth: 1
-
- - uses: actions/setup-python@v4
- with:
- python-version: '3.11'
- cache: pip
-
- - name: Install Python Packages
- run: |
- pip3 install rockset==1.0.3 boto3==1.19.12 requests==2.27.1
-
- - name: Create alerts
- run: |
- output=$(PYTHONPATH=$PYTHONPATH:$(pwd) python3 "tools/alerts/create_alerts.py")
- echo "uploading following alerts"
- echo "$output"
- echo "script-output=$output" >> "$GITHUB_OUTPUT"
- id: alert_creation_step
-
- - name: Upload alerts
- env:
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- uses: pytorch/test-infra/.github/actions/upload-alerts@main
- with:
- alerts: '${{ steps.alert_creation_step.outputs.script-output }}'
- organization: "pytorch"
- repo: "pytorch"
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
- cancel-in-progress: true
diff --git a/.github/workflows/upload-test-stats.yml b/.github/workflows/upload-test-stats.yml
deleted file mode 100644
index f71d86eb5e59f..0000000000000
--- a/.github/workflows/upload-test-stats.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-name: Upload test stats
-
-on:
- workflow_run:
- workflows: [pull, trunk, periodic, inductor, unstable, slow, unstable-periodic, inductor-periodic, rocm, inductor-micro-benchmark]
- types:
- - completed
-
-jobs:
- # the conclusion field in the github context is sometimes null
- # solution adapted from https://github.com/community/community/discussions/21090#discussioncomment-3226271
- get_workflow_conclusion:
- if: github.repository_owner == 'pytorch'
- runs-on: ubuntu-latest
- outputs:
- conclusion: ${{ fromJson(steps.get_conclusion.outputs.data).conclusion }}
- steps:
- - name: Get workflow run conclusion
- uses: octokit/request-action@v2.1.0
- id: get_conclusion
- with:
- route: GET /repos/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}/attempts/${{ github.event.workflow_run.run_attempt }}
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- upload-test-stats:
- needs: get_workflow_conclusion
- if:
- github.repository_owner == 'pytorch' &&
- (github.event.workflow_run.conclusion == 'success' || github.event.workflow_run.conclusion == 'failure' ||
- needs.get_workflow_conclusion.outputs.conclusion == 'success' || needs.get_workflow_conclusion.outputs.conclusion == 'failure')
- runs-on: ubuntu-22.04
- environment: upload-stats
- name: Upload test stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow_run.run_attempt }}
- steps:
- - name: Print workflow information
- env:
- TRIGGERING_WORKFLOW: ${{ toJSON(github.event.workflow_run) }}
- run: echo "${TRIGGERING_WORKFLOW}"
-
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
-
- - uses: actions/setup-python@v4
- with:
- python-version: '3.11'
- cache: pip
-
- - run: |
- pip3 install requests==2.26 rockset==1.0.3 boto3==1.19.12
-
- - name: Upload test artifacts
- id: upload-s3
- env:
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- WORKFLOW_ARTIFACTS_URL: ${{ github.event.workflow_run.artifacts_url }}
- WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
- WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
- REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
- run: |
- echo "${WORKFLOW_ARTIFACTS_URL}"
-
- # Note that in the case of Linux and Windows, their artifacts have already been uploaded to S3, so there simply won't be
- # anything on GitHub to upload. The command should return right away
- python3 -m tools.stats.upload_artifacts --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}"
-
- - name: Upload test stats
- env:
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
- WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
- WORKFLOW_URL: ${{ github.event.workflow_run.html_url }}
- HEAD_REPOSITORY: ${{ github.event.workflow_run.head_repository.full_name }}
- HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
- run: |
- echo "${WORKFLOW_URL}"
- python3 -m tools.stats.upload_test_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --head-branch "${HEAD_BRANCH}" --head-repository "${HEAD_REPOSITORY}"
- python3 -m tools.stats.upload_sccache_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}"
-
- - name: Analyze disabled tests rerun
- env:
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- WORKFLOW_ARTIFACTS_URL: ${{ github.event.workflow_run.artifacts_url }}
- WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
- WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
- REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
- run: |
- # Analyze the results from disable tests rerun and upload them to S3
- python3 -m tools.stats.check_disabled_tests --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}"
-
- - name: Upload gpt-fast benchmark results to Rockset
- if: steps.upload-s3.outcome && steps.upload-s3.outcome == 'success' && github.event.workflow_run.name == 'inductor-micro-benchmark'
- env:
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
- WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
- REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
- HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
- run: |
- python3 -m tools.stats.upload_dynamo_perf_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}" --head-branch "${HEAD_BRANCH}" --rockset-collection oss_ci_benchmark --rockset-workspace benchmarks --match-filename "^gpt_fast_benchmark"
-
- check-api-rate:
- if: ${{ always() && github.repository_owner == 'pytorch' }}
- runs-on: ubuntu-latest
- continue-on-error: true
- steps:
- - name: Get our GITHUB_TOKEN API limit usage
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- curl -H "Accept: application/vnd.github.v3+json" -H "Authorization: token $GITHUB_TOKEN" https://api.github.com/rate_limit
diff --git a/.github/workflows/upload-torch-dynamo-perf-stats.yml b/.github/workflows/upload-torch-dynamo-perf-stats.yml
deleted file mode 100644
index 546d4d945761b..0000000000000
--- a/.github/workflows/upload-torch-dynamo-perf-stats.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-name: Upload torch dynamo performance stats
-
-on:
- workflow_run:
- workflows: [inductor-A100-perf-nightly]
- types:
- - completed
-
-jobs:
- get-conclusion:
- runs-on: ubuntu-latest
- outputs:
- conclusion: ${{ fromJson(steps.get-conclusion.outputs.data).conclusion }}
- steps:
- - name: Get workflow run conclusion
- uses: octokit/request-action@v2.1.0
- id: get-conclusion
- with:
- route: GET /repos/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}/attempts/${{ github.event.workflow_run.run_attempt }}
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
-
- upload-perf-stats:
- needs: get-conclusion
- if: github.event.workflow_run.conclusion == 'success' || needs.get-conclusion.outputs.conclusion == 'success' ||
- github.event.workflow_run.conclusion == 'failure' || needs.get-conclusion.outputs.conclusion == 'failure'
- runs-on: ubuntu-22.04
- environment: upload-stats
- name: Upload dynamo performance stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow_run.run_attempt }}
- steps:
- - name: Checkout PyTorch
- uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
- with:
- submodules: false
- fetch-depth: 1
-
- - uses: actions/setup-python@v4
- with:
- python-version: '3.11'
- cache: pip
-
- - run: |
- pip3 install requests==2.26 rockset==1.0.3 boto3==1.19.12
-
- - name: Upload torch dynamo performance stats to S3
- id: upload-s3
- env:
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- WORKFLOW_ARTIFACTS_URL: ${{ github.event.workflow_run.artifacts_url }}
- WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
- WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
- REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
- run: |
- # Upload perf test reports from GHA to S3, which can now be downloaded
- # on HUD
- python3 -m tools.stats.upload_artifacts --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}"
-
- - name: Upload torch dynamo performance stats to Rockset
- if: steps.upload-s3.outcome && steps.upload-s3.outcome == 'success'
- env:
- ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
- WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
- REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
- HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
- run: |
- python3 -m tools.stats.upload_dynamo_perf_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}" --head-branch "${HEAD_BRANCH}" --rockset-collection torch_dynamo_perf_stats --rockset-workspace inductor --match-filename "^inductor_"
diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml
deleted file mode 100644
index f097b146c21f8..0000000000000
--- a/.github/workflows/weekly.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-name: weekly
-
-on:
- schedule:
- # Mondays at 7:37am UTC = 12:27am PST
- # Choose a random time near midnight PST because it may be delayed if there are high loads
- # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#schedule
- - cron: 37 7 * * 1
- workflow_dispatch:
-
-permissions: read-all
-
-jobs:
- update-commit-hash:
- runs-on: ubuntu-latest
- environment: update-commit-hash
- steps:
- - name: Checkout repo
- uses: actions/checkout@v3
- with:
- fetch-depth: 0
- - name: update-xla-commit-hash
- continue-on-error: true
- uses: pytorch/test-infra/.github/actions/update-commit-hash@main
- with:
- repo-name: xla
- branch: master
- pin-folder: .github/ci_commit_pins
- test-infra-ref: main
- updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
- pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
- - name: update-triton-commit-hash
- uses: pytorch/test-infra/.github/actions/update-commit-hash@main
- with:
- repo-owner: openai
- repo-name: triton
- branch: main
- pin-folder: .ci/docker/ci_commit_pins
- test-infra-ref: main
- updatebot-token: ${{ secrets.UPDATEBOT_TOKEN }}
- pytorchbot-token: ${{ secrets.GH_PYTORCHBOT_TOKEN }}
diff --git a/.github/workflows/xpu.yml b/.github/workflows/xpu.yml
deleted file mode 100644
index b48a7c01cc3be..0000000000000
--- a/.github/workflows/xpu.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: xpu
-
-on:
- push:
- tags:
- - ciflow/xpu/*
- workflow_dispatch:
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
- cancel-in-progress: true
-
-jobs:
- linux-jammy-xpu-py3_8-build:
- name: linux-jammy-xpu-py3.8
- uses: ./.github/workflows/_linux-build.yml
- with:
- build-environment: linux-jammy-xpu-py3.8
- docker-image-name: pytorch-linux-jammy-xpu-2024.0-py3
- runner: linux.2xlarge
- test-matrix: |
- { include: [
- { config: "default", shard: 1, num_shards: 4, runner: "linux.idc.xpu" },
- { config: "default", shard: 2, num_shards: 4, runner: "linux.idc.xpu" },
- { config: "default", shard: 3, num_shards: 4, runner: "linux.idc.xpu" },
- { config: "default", shard: 4, num_shards: 4, runner: "linux.idc.xpu" },
- ]}
-
- linux-jammy-xpu-py3_8-test:
- name: linux-jammy-xpu-py3.8
- uses: ./.github/workflows/_xpu-test.yml
- needs: linux-jammy-xpu-py3_8-build
- permissions:
- id-token: write
- contents: read
- with:
- build-environment: linux-jammy-xpu-py3.8
- docker-image: ${{ needs.linux-jammy-xpu-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-jammy-xpu-py3_8-build.outputs.test-matrix }}
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 763d05793895f..e18be08a5b0ba 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -5,7 +5,7 @@ sphinx==5.0.0
# something related to Docker setup. We can investigate this later
sphinxcontrib.katex==0.8.6
matplotlib==3.6.0
-tensorboard==2.10.0
+tensorboard==2.17.1
# required to build torch.distributed.elastic.rendezvous.etcd* docs
python-etcd==0.4.5
sphinx-copybutton==0.5.0