diff --git a/.ci/docker/manywheel/Dockerfile_ppc64le b/.ci/docker/manywheel/Dockerfile_ppc64le index 936d5037d74c..b34e6124dbae 100755 --- a/.ci/docker/manywheel/Dockerfile_ppc64le +++ b/.ci/docker/manywheel/Dockerfile_ppc64le @@ -75,9 +75,38 @@ ENV LD_LIBRARY_PATH=/opt/rh/gcc-toolset-${DEVTOOLSET_VERSION}/root/usr/lib64:/op # Configure git to avoid safe directory issues RUN git config --global --add safe.directory "*" +# installed python doesn't have development parts. Rebuild it from scratch +RUN /bin/rm -rf /opt/_internal /opt/python /usr/local/*/* + +# EPEL for cmake +FROM base as patchelf +# Install patchelf +ADD ./common/install_patchelf.sh install_patchelf.sh +RUN bash ./install_patchelf.sh && rm install_patchelf.sh +RUN cp $(which patchelf) /patchelf + +FROM patchelf as python +# build python +COPY manywheel/build_scripts /build_scripts +ADD ./common/install_cpython.sh /build_scripts/install_cpython.sh +ENV SSL_CERT_FILE= +RUN bash build_scripts/build.sh && rm -r build_scripts +#RUN bash build_scripts/build.sh || (echo "Checksum verification failed!" && exit 1) + +FROM base as final +COPY --from=python /opt/python /opt/python +COPY --from=python /opt/_internal /opt/_internal +COPY --from=python /opt/python/cp39-cp39/bin/auditwheel /usr/local/bin/auditwheel +COPY --from=patchelf /usr/local/bin/patchelf /usr/local/bin/patchelf + +RUN alternatives --set python /usr/bin/python3.12 +RUN alternatives --set python3 /usr/bin/python3.12 + +RUN pip-3.12 install typing_extensions + # Install required Python packages -RUN pip install --upgrade pip -RUN pip install typing_extensions pyyaml setuptools +#RUN pip install --upgrade pip +#RUN pip install typing_extensions pyyaml setuptools # Install test dependencies RUN dnf install -y \ diff --git a/.ci/docker/manywheel/build.sh b/.ci/docker/manywheel/build.sh index 8b65df9d2b62..0792fb29d54d 100755 --- a/.ci/docker/manywheel/build.sh +++ b/.ci/docker/manywheel/build.sh @@ -66,7 +66,7 @@ case ${GPU_ARCH_TYPE} in MANY_LINUX_VERSION="s390x" ;; cpu-ppc64le) - TARGET=base + TARGET=final DOCKER_TAG=ppc64le GPU_IMAGE=redhat/ubi9 DOCKER_GPU_BUILD_ARG="" diff --git a/.ci/docker/manywheel/build_scripts/build.sh b/.ci/docker/manywheel/build_scripts/build.sh index 34ea62cc2099..d2b95b3035a7 100644 --- a/.ci/docker/manywheel/build_scripts/build.sh +++ b/.ci/docker/manywheel/build_scripts/build.sh @@ -13,6 +13,7 @@ DEVTOOLS_HASH=a8ebeb4bed624700f727179e6ef771dafe47651131a00a78b342251415646acc PATCHELF_HASH=d9afdff4baeacfbc64861454f368b7f2c15c44d245293f7587bbf726bfe722fb CURL_ROOT=curl-7.73.0 CURL_HASH=cf34fe0b07b800f1c01a499a6e8b2af548f6d0e044dca4a29d88a4bee146d131 +#CURL_HASH=ed444155f1fd7d72c44424f9333893e64cbd6c1cbe4489619bce99c4dda58c14 AUTOCONF_ROOT=autoconf-2.69 AUTOCONF_HASH=954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 @@ -38,8 +39,32 @@ yum -y install bzip2 make git patch unzip bison yasm diffutils \ automake which file \ ${PYTHON_COMPILE_DEPS} -# Install newest autoconf -build_autoconf $AUTOCONF_ROOT $AUTOCONF_HASH +# Download and extract Autoconf +curl -sLO http://ftp.gnu.org/gnu/autoconf/$AUTOCONF_ROOT.tar.gz +echo "$AUTOCONF_HASH $AUTOCONF_ROOT.tar.gz" | sha256sum -c - +tar -xzf $AUTOCONF_ROOT.tar.gz +cd $AUTOCONF_ROOT + +# Update config.guess and config.sub +curl -o build-aux/config.guess http://git.savannah.gnu.org/cgit/config.git/plain/config.guess +curl -o build-aux/config.sub http://git.savannah.gnu.org/cgit/config.git/plain/config.sub +chmod +x build-aux/config.guess build-aux/config.sub + +# Configure with the correct host +./configure --host=powerpc64le-pc-linux-gnu + +# Build and install +make -j$(nproc) +make install + +# Clean up +cd .. +rm -rf $AUTOCONF_ROOT $AUTOCONF_ROOT.tar.gz + + +# Install newest autoconf (previous method) +#build_autoconf $AUTOCONF_ROOT $AUTOCONF_HASH + autoconf --version # Compile the latest Python releases. diff --git a/.ci/docker/manywheel/build_scripts/build_utils.sh b/.ci/docker/manywheel/build_scripts/build_utils.sh index cec871cac4f6..e2ab5ddc406f 100755 --- a/.ci/docker/manywheel/build_scripts/build_utils.sh +++ b/.ci/docker/manywheel/build_scripts/build_utils.sh @@ -28,7 +28,12 @@ function check_sha256sum { check_var ${fname} local sha256=$2 check_var ${sha256} - +# Compute and print actual checksum + echo "✅ Expected SHA256: ${sha256}" + echo "🔍 Calculating actual SHA256..." + actual_sha256=$(sha256sum ${fname} | awk '{print $1}') + echo "🔴 Actual SHA256: $actual_sha256" + echo "${sha256} ${fname}" > ${fname}.sha256 sha256sum -c ${fname}.sha256 rm -f ${fname}.sha256 @@ -41,7 +46,8 @@ function build_openssl { local openssl_sha256=$2 check_var ${openssl_sha256} check_var ${OPENSSL_DOWNLOAD_URL} - curl -sLO ${OPENSSL_DOWNLOAD_URL}/${openssl_fname}.tar.gz + curl -sLO ${OPENSSL_DOWNLOAD_URL}/${openssl_fname}.tar.gz + check_sha256sum ${openssl_fname}.tar.gz ${openssl_sha256} tar -xzf ${openssl_fname}.tar.gz (cd ${openssl_fname} && do_openssl_build) diff --git a/.ci/docker/manywheel/build_scripts/manylinux1-check.py b/.ci/docker/manywheel/build_scripts/manylinux1-check.py index bdc8f6102981..898998557a9b 100644 --- a/.ci/docker/manywheel/build_scripts/manylinux1-check.py +++ b/.ci/docker/manywheel/build_scripts/manylinux1-check.py @@ -5,7 +5,7 @@ def is_manylinux1_compatible(): # Only Linux, and only x86-64 / i686 from distutils.util import get_platform - if get_platform() not in ["linux-x86_64", "linux-i686", "linux-s390x"]: + if get_platform() not in ["linux-x86_64", "linux-i686", "linux-s390x", "linux-pc64le"]: return False # Check for presence of _manylinux module diff --git a/.ci/manywheel/build.sh b/.ci/manywheel/build.sh index 4c4d51134715..16f5c1941152 100755 --- a/.ci/manywheel/build.sh +++ b/.ci/manywheel/build.sh @@ -15,7 +15,7 @@ case "${GPU_ARCH_TYPE:-BLANK}" in rocm) bash "${SCRIPTPATH}/build_rocm.sh" ;; - cpu | cpu-cxx11-abi | cpu-s390x) + cpu | cpu-cxx11-abi | cpu-s390x | cpu-ppc64le) bash "${SCRIPTPATH}/build_cpu.sh" ;; xpu) diff --git a/.ci/manywheel/build_common.sh b/.ci/manywheel/build_common.sh index af96000c76eb..7099a4c48d7a 100644 --- a/.ci/manywheel/build_common.sh +++ b/.ci/manywheel/build_common.sh @@ -380,7 +380,7 @@ for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.w done # create Manylinux 2_28 tag this needs to happen before regenerate the RECORD - if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "xpu" ]]; then + if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "cpu-ppc64le" && $GPU_ARCH_TYPE != "xpu" ]]; then wheel_file=$(echo $(basename $pkg) | sed -e 's/-cp.*$/.dist-info\/WHEEL/g') sed -i -e s#linux_x86_64#"${PLATFORM}"# $wheel_file; fi @@ -425,7 +425,7 @@ for pkg in /$WHEELHOUSE_DIR/torch_no_python*.whl /$WHEELHOUSE_DIR/torch*linux*.w fi # Rename wheel for Manylinux 2_28 - if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "xpu" ]]; then + if [[ $PLATFORM == "manylinux_2_28_x86_64" && $GPU_ARCH_TYPE != "cpu-s390x" && $GPU_ARCH_TYPE != "cpu-ppc64le" && $GPU_ARCH_TYPE != "xpu" ]]; then pkg_name=$(echo $(basename $pkg) | sed -e s#linux_x86_64#"${PLATFORM}"#) zip -rq $pkg_name $PREIX* rm -f $pkg diff --git a/.ci/manywheel/build_cpu.sh b/.ci/manywheel/build_cpu.sh index 9d982bd30e25..7e2b05c698c2 100755 --- a/.ci/manywheel/build_cpu.sh +++ b/.ci/manywheel/build_cpu.sh @@ -36,6 +36,8 @@ elif [[ "$OS_NAME" == *"AlmaLinux"* ]]; then elif [[ "$OS_NAME" == *"Ubuntu"* ]]; then if [[ "$(uname -m)" == "s390x" ]]; then LIBGOMP_PATH="/usr/lib/s390x-linux-gnu/libgomp.so.1" + elif [[ "$(uname -m)" == "ppc64le" ]]; then + LIBGOMP_PATH="/usr/lib64/libgomp.so.1" else LIBGOMP_PATH="/usr/lib/x86_64-linux-gnu/libgomp.so.1" fi diff --git a/.ci/pytorch/check_binary.sh b/.ci/pytorch/check_binary.sh index 0ce770a51b6c..a521278357f1 100755 --- a/.ci/pytorch/check_binary.sh +++ b/.ci/pytorch/check_binary.sh @@ -263,7 +263,7 @@ fi if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then echo "Checking that MKL is available" build_and_run_example_cpp check-torch-mkl -elif [[ "$(uname -m)" != "arm64" && "$(uname -m)" != "s390x" ]]; then +elif [[ "$(uname -m)" != "arm64" && "$(uname -m)" != "s390x" && "$(uname -m)" != "ppc64le" ]]; then if [[ "$(uname)" != 'Darwin' || "$PACKAGE_TYPE" != *wheel ]]; then if [[ "$(uname -m)" == "aarch64" ]]; then echo "Checking that MKLDNN is available on aarch64" @@ -287,7 +287,7 @@ if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then echo "Checking that XNNPACK is available" build_and_run_example_cpp check-torch-xnnpack else - if [[ "$(uname)" != 'Darwin' || "$PACKAGE_TYPE" != *wheel ]] && [[ "$(uname -m)" != "s390x" ]]; then + if [[ "$(uname)" != 'Darwin' || "$PACKAGE_TYPE" != *wheel ]] && [[ "$(uname -m)" != "s390x" ]] && [[ "$(uname -m)" != "ppc64le" ]]; then echo "Checking that XNNPACK is available" pushd /tmp python -c 'import torch.backends.xnnpack; exit(0 if torch.backends.xnnpack.enabled else 1)' @@ -308,7 +308,7 @@ if [[ "$OSTYPE" == "msys" ]]; then fi # Test that CUDA builds are setup correctly -if [[ "$DESIRED_CUDA" != 'cpu' && "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'cpu-cxx11-abi' && "$DESIRED_CUDA" != *"rocm"* && "$(uname -m)" != "s390x" ]]; then +if [[ "$DESIRED_CUDA" != 'cpu' && "$DESIRED_CUDA" != 'xpu' && "$DESIRED_CUDA" != 'cpu-cxx11-abi' && "$DESIRED_CUDA" != *"rocm"* && "$(uname -m)" != "s390x" && "$(uname -m)" != "ppc64le" ]]; then if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then build_and_run_example_cpp check-torch-cuda else diff --git a/.github/actions/test-pytorch-binary/action.yml b/.github/actions/test-pytorch-binary/action.yml index 51fc8d14f474..a759d155f65d 100644 --- a/.github/actions/test-pytorch-binary/action.yml +++ b/.github/actions/test-pytorch-binary/action.yml @@ -35,7 +35,7 @@ runs: ) echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV" - if [[ "${GPU_ARCH_TYPE}" != "rocm" && "${BUILD_ENVIRONMENT}" != "linux-aarch64-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-s390x-binary-manywheel" && "${GPU_ARCH_TYPE}" != "xpu" ]]; then + if [[ "${GPU_ARCH_TYPE}" != "rocm" && "${BUILD_ENVIRONMENT}" != "linux-aarch64-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-s390x-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-ppc64le-binary-manywheel" && "${GPU_ARCH_TYPE}" != "xpu" ]]; then # Propagate download.pytorch.org IP to container. This is only needed on Linux non aarch64 runner grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" bash -c "/bin/cat >> /etc/hosts" fi @@ -46,7 +46,7 @@ runs: docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh" - name: Cleanup docker - if: always() && (env.BUILD_ENVIRONMENT == 'linux-s390x-binary-manywheel' || env.GPU_ARCH_TYPE == 'xpu') + if: always() && (env.BUILD_ENVIRONMENT == 'linux-s390x-binary-manywheel' || env.BUILD_ENVIRONMENT != 'linux-ppc64le-binary-manywheel' || env.GPU_ARCH_TYPE == 'xpu') shell: bash run: | # on s390x or xpu stop the container for clean worker stop diff --git a/.github/scripts/generate_binary_build_matrix.py b/.github/scripts/generate_binary_build_matrix.py index 78778b848a09..42e727f31c00 100644 --- a/.github/scripts/generate_binary_build_matrix.py +++ b/.github/scripts/generate_binary_build_matrix.py @@ -40,6 +40,9 @@ CPU_S390X_ARCH = ["cpu-s390x"] + +CPU_PPC64LE_ARCH = ["cpu-ppc64le"] + CUDA_AARCH64_ARCHES = ["12.8-aarch64"] @@ -152,6 +155,8 @@ def arch_type(arch_version: str) -> str: return "cpu-aarch64" elif arch_version in CPU_S390X_ARCH: return "cpu-s390x" + elif arch_version in CPU_PPC64LE_ARCH: + return "cpu-ppc64le" elif arch_version in CUDA_AARCH64_ARCHES: return "cuda-aarch64" else: # arch_version should always be "cpu" in this case @@ -179,6 +184,7 @@ def arch_type(arch_version: str) -> str: "cpu-cxx11-abi": f"pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-{DEFAULT_TAG}", "cpu-aarch64": f"pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-{DEFAULT_TAG}", "cpu-s390x": f"pytorch/manylinuxs390x-builder:cpu-s390x-{DEFAULT_TAG}", + "cpu-ppc64le": f"pytorch/manylinuxppc64le-builder:cpu-ppc64le-{DEFAULT_TAG}", } CXX11_ABI = "cxx11-abi" @@ -212,6 +218,7 @@ def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str: "cpu-aarch64": "cpu", "cpu-cxx11-abi": "cpu-cxx11-abi", "cpu-s390x": "cpu", + "cpu-ppc64le": "cpu", "cuda": f"cu{gpu_arch_version.replace('.', '')}", "cuda-aarch64": f"cu{gpu_arch_version.replace('-aarch64', '').replace('.', '')}", "rocm": f"rocm{gpu_arch_version}", @@ -290,8 +297,8 @@ def generate_wheels_matrix( use_split_build: bool = False, ) -> list[dict[str, str]]: package_type = "wheel" - if os == "linux" or os == "linux-aarch64" or os == "linux-s390x": - # NOTE: We only build manywheel packages for x86_64 and aarch64 and s390x linux + if os == "linux" or os == "linux-aarch64" or os == "linux-s390x" or os == "linux-ppc64le": + # NOTE: We only build manywheel packages for x86_64 and aarch64 and s390x and ppc64le linux package_type = "manywheel" if python_versions is None: @@ -312,6 +319,11 @@ def generate_wheels_matrix( # Only want the one arch as the CPU type is different and # uses different build/test scripts arches = ["cpu-s390x"] + elif os == "linux-ppc64le": + # Only want the one arch as the CPU type is different and + # uses different build/test scripts + arches = ["cpu-ppc64le"] + ret: list[dict[str, str]] = [] for python_version in python_versions: @@ -323,13 +335,15 @@ def generate_wheels_matrix( or arch_version == "cpu-cxx11-abi" or arch_version == "cpu-aarch64" or arch_version == "cpu-s390x" + or arch_version == "cpu-ppc64le" or arch_version == "xpu" else arch_version ) - # TODO: Enable python 3.13t on cpu-s390x - if gpu_arch_type == "cpu-s390x" and python_version == "3.13t": - continue + + # TODO: Enable python 3.13t cpu-s390x or MacOS or Windows + if (gpu_arch_type == "cpu-s390x" or gpu_arch_type == "cpu-ppc64le") and python_version == "3.13t": + continue if use_split_build and ( arch_version not in ["12.6", "12.8", "11.8", "cpu"] or os != "linux" diff --git a/.github/scripts/generate_ci_workflows.py b/.github/scripts/generate_ci_workflows.py index 520845413e20..865711ac5677 100755 --- a/.github/scripts/generate_ci_workflows.py +++ b/.github/scripts/generate_ci_workflows.py @@ -101,6 +101,7 @@ class OperatingSystem: MACOS_ARM64 = "macos-arm64" LINUX_AARCH64 = "linux-aarch64" LINUX_S390X = "linux-s390x" + LINUX_PPC64LE = "linux-ppc64le" LINUX_BINARY_BUILD_WORFKLOWS = [ @@ -368,6 +369,20 @@ class OperatingSystem: ), ] +PPC64LE_BINARY_BUILD_WORKFLOWS = [ + BinaryBuildWorkflow( + os=OperatingSystem.LINUX_PPC64LE, + package_type="manywheel", + build_configs=generate_binary_build_matrix.generate_wheels_matrix( + OperatingSystem.LINUX_PPC64LE + ), + ciflow_config=CIFlowConfig( + labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL}, + isolated_workflow=True, + ), + ), +] + def main() -> None: jinja_env = jinja2.Environment( @@ -390,6 +405,10 @@ def main() -> None: jinja_env.get_template("linux_binary_build_workflow.yml.j2"), S390X_BINARY_BUILD_WORKFLOWS, ), + ( + jinja_env.get_template("linux_binary_build_workflow.yml.j2"), + PPC64LE_BINARY_BUILD_WORKFLOWS, + ), ( jinja_env.get_template("linux_binary_build_workflow.yml.j2"), LINUX_BINARY_SMOKE_WORKFLOWS, diff --git a/.github/templates/linux_binary_build_workflow.yml.j2 b/.github/templates/linux_binary_build_workflow.yml.j2 index e0fda97e324e..dbd938a04cb9 100644 --- a/.github/templates/linux_binary_build_workflow.yml.j2 +++ b/.github/templates/linux_binary_build_workflow.yml.j2 @@ -35,6 +35,8 @@ env: ALPINE_IMAGE: "arm64v8/alpine" {%- elif "s390x" in build_environment %} ALPINE_IMAGE: "docker.io/s390x/alpine" + {%- elif "ppc64le" in build_environment %} + ALPINE_IMAGE: "docker.io/ppc64le/alpine" {%- else %} ALPINE_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/tool/alpine" {%- endif %} @@ -74,6 +76,10 @@ jobs: runs_on: linux.s390x ALPINE_IMAGE: "docker.io/s390x/alpine" timeout-minutes: 420 + {%- elif "ppc64le" in build_environment %} + runs_on: linux.ppc64le + ALPINE_IMAGE: "docker.io/ppc64le/alpine" + timeout-minutes: 420 {%- elif "conda" in build_environment and config["gpu_arch_type"] == "cuda" %} runner_prefix: "${{ needs.get-label-type.outputs.label-type }}" runs_on: linux.24xlarge.ephemeral @@ -109,6 +115,9 @@ jobs: {%- elif "s390x" in build_environment %} runs_on: linux.s390x ALPINE_IMAGE: "docker.io/s390x/alpine" + {%- elif "ppc64le" in build_environment %} + runs_on: linux.ppc64le + ALPINE_IMAGE: "docker.io/ppc64le/alpine" {%- elif config["gpu_arch_type"] == "rocm" %} runs_on: linux.rocm.gpu {%- elif config["gpu_arch_type"] == "cuda" and config["gpu_arch_version"] == "12.8" %} diff --git a/.github/workflows/_binary-build-linux.yml b/.github/workflows/_binary-build-linux.yml index 05a91f2e746e..0366c6cccc75 100644 --- a/.github/workflows/_binary-build-linux.yml +++ b/.github/workflows/_binary-build-linux.yml @@ -149,7 +149,7 @@ jobs: run: env - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" - if: inputs.build_environment != 'linux-s390x-binary-manywheel' + if: inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: pytorch/test-infra/.github/actions/setup-ssh@main continue-on-error: true with: @@ -158,14 +158,14 @@ jobs: - name: Checkout PyTorch uses: pytorch/pytorch/.github/actions/checkout-pytorch@main with: - no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }} + no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' || inputs.build_environment == 'linux-ppc64le-binary-manywheel' }} - name: Setup Linux - if: inputs.build_environment != 'linux-s390x-binary-manywheel' + if: inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: ./.github/actions/setup-linux - name: Chown workspace - if: inputs.build_environment != 'linux-s390x-binary-manywheel' + if: inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: ./.github/actions/chown-workspace with: ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }} @@ -178,7 +178,7 @@ jobs: rm -rf "${GITHUB_WORKSPACE}" mkdir "${GITHUB_WORKSPACE}" - if [[ ${{ inputs.build_environment }} == 'linux-aarch64-binary-manywheel' ]] || [[ ${{ inputs.build_environment }} == 'linux-s390x-binary-manywheel' ]] ; then + if [[ ${{ inputs.build_environment }} == 'linux-aarch64-binary-manywheel' ]] || [[ ${{ inputs.build_environment }} == 'linux-s390x-binary-manywheel' ]] || [[ ${{ inputs.build_environment }} == 'linux-ppc64le-binary-manywheel' ]] ; then rm -rf "${RUNNER_TEMP}/artifacts" mkdir "${RUNNER_TEMP}/artifacts" fi @@ -210,7 +210,7 @@ jobs: ]} - name: Pull Docker image - if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }} + if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' }} uses: pytorch/test-infra/.github/actions/pull-docker-image@main with: docker-image: ${{ inputs.DOCKER_IMAGE }} @@ -251,7 +251,7 @@ jobs: fi - name: Chown artifacts - if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }} + if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' }} shell: bash run: | # Ensure the working directory gets chowned back to the current user @@ -266,17 +266,17 @@ jobs: ${{ runner.temp }}/artifacts/* - name: Teardown Linux - if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' + if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: pytorch/test-infra/.github/actions/teardown-linux@main - name: Chown workspace - if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' + if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: ./pytorch/.github/actions/chown-workspace with: ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }} - name: Cleanup docker - if: always() && inputs.build_environment == 'linux-s390x-binary-manywheel' + if: always() && ( inputs.build_environment == 'linux-s390x-binary-manywheel' || inputs.build_environment == 'linux-ppc64le-binary-manywheel' ) shell: bash run: | # on s390x stop the container for clean worker stop diff --git a/.github/workflows/_binary-test-linux.yml b/.github/workflows/_binary-test-linux.yml index 09490cc1b9f4..801eca5de7a2 100644 --- a/.github/workflows/_binary-test-linux.yml +++ b/.github/workflows/_binary-test-linux.yml @@ -132,7 +132,7 @@ jobs: } >> "${GITHUB_ENV} }}" - name: "[FB EMPLOYEES] Enable SSH (Click me for login details)" - if: inputs.build_environment != 'linux-s390x-binary-manywheel' + if: inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: pytorch/test-infra/.github/actions/setup-ssh@main continue-on-error: true with: @@ -142,14 +142,14 @@ jobs: - name: Checkout PyTorch uses: pytorch/pytorch/.github/actions/checkout-pytorch@main with: - no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' }} + no-sudo: ${{ inputs.build_environment == 'linux-aarch64-binary-manywheel' || inputs.build_environment == 'linux-s390x-binary-manywheel' || inputs.build_environment != 'linux-ppc64le-binary-manywheel' }} - name: Setup Linux - if: inputs.build_environment != 'linux-s390x-binary-manywheel' + if: inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: ./.github/actions/setup-linux - name: Chown workspace - if: inputs.build_environment != 'linux-s390x-binary-manywheel' + if: inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: ./.github/actions/chown-workspace with: ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }} @@ -198,7 +198,7 @@ jobs: if: ${{ inputs.GPU_ARCH_TYPE == 'cuda' && steps.filter.outputs.is-test-matrix-empty == 'False' }} - name: Pull Docker image - if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' }} + if: ${{ steps.filter.outputs.is-test-matrix-empty == 'False' && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' }} uses: pytorch/test-infra/.github/actions/pull-docker-image@main with: docker-image: ${{ inputs.DOCKER_IMAGE }} @@ -208,11 +208,11 @@ jobs: uses: ./pytorch/.github/actions/test-pytorch-binary - name: Teardown Linux - if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' + if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: pytorch/test-infra/.github/actions/teardown-linux@main - name: Chown workspace - if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' + if: always() && inputs.build_environment != 'linux-s390x-binary-manywheel' && inputs.build_environment != 'linux-ppc64le-binary-manywheel' uses: ./pytorch/.github/actions/chown-workspace with: ALPINE_IMAGE: ${{ inputs.ALPINE_IMAGE }} diff --git a/.github/workflows/_linux-build.yml b/.github/workflows/_linux-build.yml index 7f6132fd4d7a..385cf3c47e6d 100644 --- a/.github/workflows/_linux-build.yml +++ b/.github/workflows/_linux-build.yml @@ -97,8 +97,7 @@ on: jobs: build: environment: ${{ github.ref == 'refs/heads/main' && 'scribe-protected' || startsWith(github.ref, 'refs/heads/release/') && 'scribe-protected' || contains(github.event.pull_request.labels.*.name, 'ci-scribe') && 'scribe-pr' || '' }} - # Don't run on forked repos - if: github.repository_owner == 'pytorch' + runs-on: ${{ inputs.runner_prefix}}${{ inputs.runner }} timeout-minutes: 240 outputs: diff --git a/.github/workflows/_runner-determinator.yml b/.github/workflows/_runner-determinator.yml index 47cd278bb8a7..dc6caf917de5 100644 --- a/.github/workflows/_runner-determinator.yml +++ b/.github/workflows/_runner-determinator.yml @@ -41,7 +41,7 @@ on: jobs: runner-determinator: # Don't run on forked repos - if: github.repository_owner == 'pytorch' + runs-on: ubuntu-latest outputs: label-type: ${{ steps.set-condition.outputs.label-type }} diff --git a/.github/workflows/generated-linux-ppc64le-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-ppc64le-binary-manywheel-nightly.yml new file mode 100755 index 000000000000..bb9a7fed84f1 --- /dev/null +++ b/.github/workflows/generated-linux-ppc64le-binary-manywheel-nightly.yml @@ -0,0 +1,111 @@ +# @generated DO NOT EDIT MANUALLY + +# Template is at: .github/templates/linux_binary_build_workflow.yml.j2 +# Generation script: .github/scripts/generate_ci_workflows.py +name: linux-ppc64le-binary-manywheel + + +on: + push: + # NOTE: Meta Employees can trigger new nightlies using: https://fburl.com/trigger_pytorch_nightly_build + branches: + - temp-ppc64le-wheel-branch-v6 + tags: + # NOTE: Binary build pipelines should only get triggered on release candidate builds + # Release candidate tags look like: v1.11.0-rc1 + - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+ + - 'ciflow/binaries/*' + - 'ciflow/binaries_wheel/*' + workflow_dispatch: + +env: + # Needed for conda builds + ALPINE_IMAGE: "docker.io/ppc64le/alpine" + AWS_DEFAULT_REGION: us-east-1 + BINARY_ENV_FILE: /tmp/env + BUILD_ENVIRONMENT: linux-ppc64le-binary-manywheel + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PYTORCH_FINAL_PACKAGE_DIR: /artifacts + PYTORCH_ROOT: /pytorch + SHA1: ${{ github.event.pull_request.head.sha || github.sha }} + SKIP_ALL_TESTS: 0 +concurrency: + group: linux-ppc64le-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }} + cancel-in-progress: true + +jobs: + get-label-type: + + name: get-label-type + uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main + with: + triggering_actor: ${{ github.triggering_actor }} + issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }} + curr_branch: ${{ github.head_ref || github.ref_name }} + curr_ref_type: ${{ github.ref_type }} + manywheel-py3_13-cpu-ppc64le-build: + + uses: ./.github/workflows/_binary-build-linux.yml + + with: + PYTORCH_ROOT: /pytorch + PACKAGE_TYPE: manywheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu-ppc64le + DOCKER_IMAGE: pytorch/manylinuxppc64le-builder:cpu-ppc64le-main + use_split_build: False + DESIRED_PYTHON: "3.13" + runs_on: linux.ppc64le + ALPINE_IMAGE: "docker.io/ppc64le/alpine" + timeout-minutes: 420 + build_name: manywheel-py3_13-cpu-ppc64le + build_environment: linux-ppc64le-binary-manywheel + PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.5.8; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.1.3; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.147; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.1.9; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.1.170; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparselt-cu12==0.6.2; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.127; platform_system == 'Linux' and platform_machine == 'x86_64' + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + manywheel-py3_13-cpu-ppc64le-test: # Testing + + needs: + - manywheel-py3_13-cpu-ppc64le-build + + uses: ./.github/workflows/_binary-test-linux.yml + with: + PYTORCH_ROOT: /pytorch + PACKAGE_TYPE: manywheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu-ppc64le + DOCKER_IMAGE: pytorch/manylinuxppc64le-builder:cpu-ppc64le-main + use_split_build: False + DESIRED_PYTHON: "3.13" + build_name: manywheel-py3_13-cpu-ppc64le + build_environment: linux-ppc64le-binary-manywheel + runs_on: linux.ppc64le + ALPINE_IMAGE: "docker.io/ppc64le/alpine" + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + manywheel-py3_13-cpu-ppc64le-upload: # Uploading + + permissions: + id-token: write + contents: read + needs: manywheel-py3_13-cpu-ppc64le-test + with: + PYTORCH_ROOT: /pytorch + PACKAGE_TYPE: manywheel + # TODO: This is a legacy variable that we eventually want to get rid of in + # favor of GPU_ARCH_VERSION + DESIRED_CUDA: cpu + GPU_ARCH_TYPE: cpu-ppc64le + DOCKER_IMAGE: pytorch/manylinuxppc64le-builder:cpu-ppc64le-main + use_split_build: False + DESIRED_PYTHON: "3.13" + build_name: manywheel-py3_13-cpu-ppc64le + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + uses: ./.github/workflows/_binary-upload.yml + \ No newline at end of file diff --git a/.github/workflows/ppc64le.yml b/.github/workflows/ppc64le.yml index e5875982c187..3e261d00d675 100755 --- a/.github/workflows/ppc64le.yml +++ b/.github/workflows/ppc64le.yml @@ -12,7 +12,6 @@ concurrency: jobs: linux-manylinux-2_28-py3-cpu-ppc64le-build: - if: github.repository_owner == 'pytorch' name: linux-manylinux-2_28-py3-cpu-ppc64le-build uses: ./.github/workflows/_linux-build.yml with: