Skip to content

Commit

Permalink
Update on "stft: Change require_complex warning to an error"
Browse files Browse the repository at this point in the history
[ghstack-poisoned]
  • Loading branch information
peterbell10 committed Dec 15, 2020
2 parents 971e0bc + 8928757 commit a643614
Show file tree
Hide file tree
Showing 485 changed files with 21,070 additions and 7,057 deletions.
3 changes: 3 additions & 0 deletions .circleci/cimodel/data/simple/util/versions.py
Expand Up @@ -29,3 +29,6 @@ def __init__(self, major, minor):
self.minor = minor

super().__init__([self.major, self.minor], "cuda")

def __str__(self):
return f"{self.major}.{self.minor}"
3 changes: 2 additions & 1 deletion .circleci/cimodel/data/windows_build_definitions.py
Expand Up @@ -86,10 +86,11 @@ def gen_tree(self):
props_dict["executor"] = "windows-with-nvidia-gpu"

props_dict["cuda_version"] = (
miniutils.quote(str(self.cuda_version.major))
miniutils.quote(str(self.cuda_version))
if self.cuda_version
else "cpu"
)

props_dict["name"] = "_".join(name_parts)

return [{key_name: props_dict}]
Expand Down
20 changes: 10 additions & 10 deletions .circleci/config.yml
Expand Up @@ -325,7 +325,7 @@ pytorch_windows_params: &pytorch_windows_params
default: ""
cuda_version:
type: string
default: "10"
default: "10.1"
python_version:
type: string
default: "3.6"
Expand Down Expand Up @@ -675,7 +675,7 @@ jobs:
default: ""
cuda_version:
type: string
default: "10"
default: "10.1"
python_version:
type: string
default: "3.6"
Expand Down Expand Up @@ -737,7 +737,7 @@ jobs:
default: ""
cuda_version:
type: string
default: "10"
default: "10.1"
python_version:
type: string
default: "3.6"
Expand Down Expand Up @@ -8077,7 +8077,7 @@ workflows:
- postnightly
- pytorch_windows_build:
build_environment: pytorch-win-vs2019-cuda10-cudnn7-py3
cuda_version: "10"
cuda_version: "10.1"
name: pytorch_windows_vs2019_py36_cuda10.1_build
python_version: "3.6"
use_cuda: "1"
Expand All @@ -8086,7 +8086,7 @@ workflows:
vc_year: "2019"
- pytorch_windows_test:
build_environment: pytorch-win-vs2019-cuda10-cudnn7-py3
cuda_version: "10"
cuda_version: "10.1"
executor: windows-with-nvidia-gpu
name: pytorch_windows_vs2019_py36_cuda10.1_test1
python_version: "3.6"
Expand All @@ -8099,7 +8099,7 @@ workflows:
vc_year: "2019"
- pytorch_windows_test:
build_environment: pytorch-win-vs2019-cuda10-cudnn7-py3
cuda_version: "10"
cuda_version: "10.1"
executor: windows-with-nvidia-gpu
name: pytorch_windows_vs2019_py36_cuda10.1_test2
python_version: "3.6"
Expand All @@ -8112,7 +8112,7 @@ workflows:
vc_year: "2019"
- pytorch_windows_build:
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
cuda_version: "11"
cuda_version: "11.1"
name: pytorch_windows_vs2019_py36_cuda11.1_build
python_version: "3.6"
use_cuda: "1"
Expand All @@ -8121,7 +8121,7 @@ workflows:
vc_year: "2019"
- pytorch_windows_test:
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
cuda_version: "11"
cuda_version: "11.1"
executor: windows-with-nvidia-gpu
filters:
branches:
Expand All @@ -8140,7 +8140,7 @@ workflows:
vc_year: "2019"
- pytorch_windows_test:
build_environment: pytorch-win-vs2019-cuda11-cudnn8-py3
cuda_version: "11"
cuda_version: "11.1"
executor: windows-with-nvidia-gpu
filters:
branches:
Expand Down Expand Up @@ -8204,7 +8204,7 @@ workflows:
vc_year: "2019"
- pytorch_windows_test:
build_environment: pytorch-win-vs2019-cuda10-cudnn7-py3
cuda_version: "10"
cuda_version: "10.1"
filters:
branches:
only:
Expand Down
8 changes: 7 additions & 1 deletion .circleci/scripts/binary_ios_upload.sh
Expand Up @@ -34,7 +34,13 @@ touch version.txt
echo $(date +%s) > version.txt
zip -r ${ZIPFILE} install src version.txt LICENSE
# upload to aws
brew install awscli
# Install conda then 'conda install' awscli
curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh
chmod +x ~/conda.sh
/bin/bash ~/conda.sh -b -p ~/anaconda
export PATH="~/anaconda/bin:${PATH}"
source ~/anaconda/bin/activate
conda install -c conda-forge awscli --yes
set +x
export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD}
export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD}
Expand Down
10 changes: 4 additions & 6 deletions .circleci/scripts/windows_cuda_install.sh
@@ -1,13 +1,11 @@
#!/bin/bash
set -eux -o pipefail

if [[ "$CUDA_VERSION" == "10" ]]; then
cuda_complete_version="10.1"
if [[ "$CUDA_VERSION" =~ ^10.* ]]; then
cuda_installer_name="cuda_10.1.243_426.00_win10"
msbuild_project_dir="CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
cuda_install_packages="nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1"
elif [[ "$CUDA_VERSION" == "11" ]]; then
cuda_complete_version="11.1"
elif [[ "$CUDA_VERSION" =~ ^11.* ]]; then
cuda_installer_name="cuda_11.1.0_456.43_win10"
msbuild_project_dir="visual_studio_integration/CUDAVisualStudioIntegration/extras/visual_studio_integration/MSBuildExtensions"
cuda_install_packages="nvcc_11.1 cuobjdump_11.1 nvprune_11.1 nvprof_11.1 cupti_11.1 cublas_11.1 cublas_dev_11.1 cudart_11.1 cufft_11.1 cufft_dev_11.1 curand_11.1 curand_dev_11.1 cusolver_11.1 cusolver_dev_11.1 cusparse_11.1 cusparse_dev_11.1 npp_11.1 npp_dev_11.1 nvrtc_11.1 nvrtc_dev_11.1 nvml_dev_11.1"
Expand All @@ -16,7 +14,7 @@ else
exit 1
fi

if [[ "${CUDA_VERSION}" != "10" && "${JOB_EXECUTOR}" == "windows-with-nvidia-gpu" ]]; then
if [[ "$CUDA_VERSION" =~ ^11.* && "${JOB_EXECUTOR}" == "windows-with-nvidia-gpu" ]]; then
cuda_install_packages="${cuda_install_packages} Display.Driver"
fi

Expand Down Expand Up @@ -48,7 +46,7 @@ then
export NVTOOLSEXT_PATH="C:\\Program Files\\NVIDIA Corporation\\NvToolsExt\\"
fi

if ! ls "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${cuda_complete_version}/bin/nvcc.exe"
if ! ls "/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/bin/nvcc.exe"
then
echo "CUDA installation failed"
mkdir -p /c/w/build-results
Expand Down
12 changes: 5 additions & 7 deletions .circleci/scripts/windows_cudnn_install.sh
@@ -1,12 +1,10 @@
#!/bin/bash
set -eux -o pipefail

if [[ "$CUDA_VERSION" == "10" ]]; then
cuda_complete_version="10.1"
cudnn_installer_name="cudnn-10.1-windows10-x64-v7.6.4.38"
elif [[ "$CUDA_VERSION" == "11" ]]; then
cuda_complete_version="11.1"
cudnn_installer_name="cudnn-11.1-windows-x64-v8.0.5.39"
if [[ "$CUDA_VERSION" =~ ^10.* ]]; then
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows10-x64-v7.6.4.38"
elif [[ "$CUDA_VERSION" =~ ^11.* ]]; then
cudnn_installer_name="cudnn-${CUDA_VERSION}-windows-x64-v8.0.5.39"
else
echo "CUDNN for CUDA_VERSION $CUDA_VERSION is not supported yet"
exit 1
Expand All @@ -16,6 +14,6 @@ cudnn_installer_link="https://ossci-windows.s3.amazonaws.com/${cudnn_installer_n

curl --retry 3 -O $cudnn_installer_link
7z x ${cudnn_installer_name}.zip -ocudnn
cp -r cudnn/cuda/* "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${cuda_complete_version}/"
cp -r cudnn/cuda/* "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v${CUDA_VERSION}/"
rm -rf cudnn
rm -f ${cudnn_installer_name}.zip
Expand Up @@ -59,7 +59,7 @@ pytorch_windows_params: &pytorch_windows_params
default: ""
cuda_version:
type: string
default: "10"
default: "10.1"
python_version:
type: string
default: "3.6"
Expand Down
4 changes: 2 additions & 2 deletions .circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Expand Up @@ -237,7 +237,7 @@ jobs:
default: ""
cuda_version:
type: string
default: "10"
default: "10.1"
python_version:
type: string
default: "3.6"
Expand Down Expand Up @@ -299,7 +299,7 @@ jobs:
default: ""
cuda_version:
type: string
default: "10"
default: "10.1"
python_version:
type: string
default: "3.6"
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Expand Up @@ -75,7 +75,7 @@ jobs:
- name: Run flake8
run: |
set -eux
pip install flake8==3.8.2 flake8-bugbear==20.1.4 flake8-comprehensions==3.3.0 flake8-executable==2.0.4 flake8-pyi==20.5.0 mccabe pycodestyle==2.6.0 pyflakes==2.2.0
pip install -r requirements-flake8.txt
flake8 --version
flake8 | tee ${GITHUB_WORKSPACE}/flake8-output.txt
- name: Add annotations
Expand Down
43 changes: 0 additions & 43 deletions .jenkins/caffe2/build.sh
Expand Up @@ -18,49 +18,6 @@ build_to_cmake () {


SCCACHE="$(which sccache)"
if [ "$(which gcc)" != "/root/sccache/gcc" ]; then
# Setup SCCACHE
###############################################################################
# Setup sccache if SCCACHE_BUCKET is set
if [ -n "${SCCACHE_BUCKET}" ]; then
mkdir -p ./sccache

SCCACHE="$(which sccache)"
if [ -z "${SCCACHE}" ]; then
echo "Unable to find sccache..."
exit 1
fi

# Setup wrapper scripts
wrapped="cc c++ gcc g++ x86_64-linux-gnu-gcc"
if [[ "${BUILD_ENVIRONMENT}" == *-cuda* ]]; then
wrapped="$wrapped nvcc"
fi
for compiler in $wrapped; do
(
echo "#!/bin/sh"

# TODO: if/when sccache gains native support for an
# SCCACHE_DISABLE flag analogous to ccache's CCACHE_DISABLE,
# this can be removed. Alternatively, this can be removed when
# https://github.com/pytorch/pytorch/issues/13362 is fixed.
#
# NOTE: carefully quoted - we want `which compiler` to be
# resolved as we execute the script, but SCCACHE_DISABLE and
# $@ to be evaluated when we execute the script
echo 'test $SCCACHE_DISABLE && exec '"$(which $compiler)"' "$@"'

echo "exec $SCCACHE $(which $compiler) \"\$@\""
) > "./sccache/$compiler"
chmod +x "./sccache/$compiler"
done

export CACHE_WRAPPER_DIR="$PWD/sccache"

# CMake must find these wrapper scripts
export PATH="$CACHE_WRAPPER_DIR:$PATH"
fi
fi

# Setup ccache if configured to use it (and not sccache)
if [ -z "${SCCACHE}" ] && which ccache > /dev/null; then
Expand Down
35 changes: 2 additions & 33 deletions .jenkins/pytorch/build.sh
@@ -1,19 +1,14 @@
#!/bin/bash

set -ex

# Required environment variable: $BUILD_ENVIRONMENT
# (This is set by default in the Docker images we build, so you don't
# need to set it yourself.

# shellcheck disable=SC2034
COMPACT_JOB_NAME="${BUILD_ENVIRONMENT}"

# Temp: use new sccache
if [[ -n "$IN_CI" && "$BUILD_ENVIRONMENT" == *rocm* ]]; then
# Download customized sccache
sudo curl --retry 3 http://repo.radeon.com/misc/.sccache_amd/sccache -o /opt/cache/bin/sccache
sudo chmod 755 /opt/cache/bin/sccache
fi

source "$(dirname "${BASH_SOURCE[0]}")/common.sh"

if [[ "$BUILD_ENVIRONMENT" == *-linux-xenial-py3-clang5-asan* ]]; then
Expand Down Expand Up @@ -124,32 +119,6 @@ if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
export MAX_JOBS=$(($(nproc) - 1))
fi

# ROCm CI is using Caffe2 docker images, which needs these wrapper
# scripts to correctly use sccache.
if [[ -n "${SCCACHE_BUCKET}" && -z "$IN_CI" ]]; then
mkdir -p ./sccache

SCCACHE="$(which sccache)"
if [ -z "${SCCACHE}" ]; then
echo "Unable to find sccache..."
exit 1
fi

# Setup wrapper scripts
for compiler in cc c++ gcc g++ clang clang++; do
(
echo "#!/bin/sh"
echo "exec $SCCACHE $(which $compiler) \"\$@\""
) > "./sccache/$compiler"
chmod +x "./sccache/$compiler"
done

export CACHE_WRAPPER_DIR="$PWD/sccache"

# CMake must find these wrapper scripts
export PATH="$CACHE_WRAPPER_DIR:$PATH"
fi

if [[ -n "$IN_CI" ]]; then
# Set ROCM_ARCH to gfx900 and gfx906 for CI builds
echo "Limiting PYTORCH_ROCM_ARCH to gfx90[06] for CI builds"
Expand Down
3 changes: 2 additions & 1 deletion .jenkins/pytorch/codegen-test.sh
Expand Up @@ -37,7 +37,8 @@ python -m tools.setup_helpers.generate_code \
mkdir -p "$OUT"/pyi/torch/_C
mkdir -p "$OUT"/pyi/torch/nn
python -m tools.pyi.gen_pyi \
--declarations-path "$OUT"/torch/share/ATen/Declarations.yaml \
--native-functions-path aten/src/ATen/native/native_functions.yaml \
--deprecated-functions-path tools/autograd/deprecated.yaml \
--out "$OUT"/pyi

# autograd codegen (called by torch codegen but can run independently)
Expand Down
1 change: 1 addition & 0 deletions .jenkins/pytorch/multigpu-test.sh
Expand Up @@ -21,4 +21,5 @@ time python test/run_test.py --verbose -i distributed/test_jit_c10d
time python test/run_test.py --verbose -i distributed/test_distributed_fork
time python test/run_test.py --verbose -i distributed/test_c10d
time python test/run_test.py --verbose -i distributed/test_c10d_spawn
time python test/run_test.py --verbose -i distributed/rpc/test_tensorpipe_agent
assert_git_not_dirty
2 changes: 2 additions & 0 deletions .jenkins/pytorch/test.sh
Expand Up @@ -11,6 +11,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh"

echo "Testing pytorch"

export LANG=C.UTF-8

if [[ "$BUILD_ENVIRONMENT" == *-slow-* ]]; then
export PYTORCH_TEST_WITH_SLOW=1
export PYTORCH_TEST_SKIP_FAST=1
Expand Down
34 changes: 10 additions & 24 deletions .jenkins/pytorch/win-test-helpers/build_pytorch.bat
Expand Up @@ -37,33 +37,19 @@ if "%VC_VERSION%" == "" (
@echo on
popd

if "%CUDA_VERSION%" == "9" goto cuda_build_9
if "%CUDA_VERSION%" == "10" goto cuda_build_10
if "%CUDA_VERSION%" == "11" goto cuda_build_11
goto cuda_build_end
if not "%USE_CUDA%"=="1" goto cuda_build_end

:cuda_build_9
set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%

set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.2
set CUDA_PATH_V9_2=%CUDA_PATH%
rem version transformer, for example 10.1 to 10_1.
set VERSION_SUFFIX=%CUDA_VERSION:.=_%
set CUDA_PATH_V%VERSION_SUFFIX%=%CUDA_PATH%

goto cuda_build_common

:cuda_build_10

set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1
set CUDA_PATH_V10_1=%CUDA_PATH%

goto cuda_build_common

:cuda_build_11

set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.1
set CUDA_PATH_V11_1=%CUDA_PATH%

goto cuda_build_common

:cuda_build_common
set CUDNN_LIB_DIR=%CUDA_PATH%\lib\x64
set CUDA_TOOLKIT_ROOT_DIR=%CUDA_PATH%
set CUDNN_ROOT_DIR=%CUDA_PATH%
set NVTOOLSEXT_PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt
set PATH=%CUDA_PATH%\bin;%CUDA_PATH%\libnvvp;%PATH%

set CUDNN_LIB_DIR=%CUDA_PATH%\lib\x64
set CUDA_TOOLKIT_ROOT_DIR=%CUDA_PATH%
Expand Down

0 comments on commit a643614

Please sign in to comment.