Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into ci-all/cudagraphs…
Browse files Browse the repository at this point in the history
…_generator_diffs_v2
  • Loading branch information
mcarilli committed Dec 2, 2020
2 parents 37baeba + b2ec21a commit 28d5a21
Show file tree
Hide file tree
Showing 163 changed files with 2,351 additions and 1,542 deletions.
2 changes: 1 addition & 1 deletion .circleci/cimodel/data/pytorch_build_data.py
Expand Up @@ -93,7 +93,7 @@
]),
]),
("rocm", [
("3.9", [
("3.7", [
("3.6", [
('build_only', [XImportant(True)]),
]),
Expand Down
1 change: 1 addition & 0 deletions .circleci/cimodel/data/simple/docker_definitions.py
Expand Up @@ -29,6 +29,7 @@
"pytorch-linux-xenial-py3.6-gcc5.4", # this one is used in doc builds
"pytorch-linux-xenial-py3.6-gcc7.2",
"pytorch-linux-xenial-py3.6-gcc7",
"pytorch-linux-bionic-rocm3.7-py3.6",
"pytorch-linux-bionic-rocm3.8-py3.6",
"pytorch-linux-bionic-rocm3.9-py3.6",
]
Expand Down
27 changes: 19 additions & 8 deletions .circleci/config.yml
Expand Up @@ -453,8 +453,12 @@ jobs:
no_output_timeout: "1h"
command: |
set -e
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
# TODO: Remove this after we figure out why rocm tests are failing
if [[ "${DOCKER_IMAGE}" == *rocm3.5* ]]; then
export DOCKER_TAG="ab1632df-fa59-40e6-8c23-98e004f61148"
fi
if [[ "${DOCKER_IMAGE}" == *rocm3.7* ]]; then
export DOCKER_TAG="1045c7b891104cb4fd23399eab413b6213e48aeb"
fi
if [[ ${BUILD_ENVIRONMENT} == *"pure_torch"* ]]; then
echo 'BUILD_CAFFE2=OFF' >> "${BASH_ENV}"
Expand Down Expand Up @@ -534,8 +538,12 @@ jobs:
command: |
set -e
export PYTHONUNBUFFERED=1
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
# TODO: Remove this after we figure out why rocm tests are failing
if [[ "${DOCKER_IMAGE}" == *rocm3.5* ]]; then
export DOCKER_TAG="ab1632df-fa59-40e6-8c23-98e004f61148"
fi
if [[ "${DOCKER_IMAGE}" == *rocm3.7* ]]; then
export DOCKER_TAG="1045c7b891104cb4fd23399eab413b6213e48aeb"
fi
# See Note [Special build images]
output_image=${DOCKER_IMAGE}:${DOCKER_TAG}-${CIRCLE_SHA1}
Expand Down Expand Up @@ -7272,6 +7280,9 @@ workflows:
- docker_build_job:
name: "docker-pytorch-linux-xenial-py3.6-gcc7"
image_name: "pytorch-linux-xenial-py3.6-gcc7"
- docker_build_job:
name: "docker-pytorch-linux-bionic-rocm3.7-py3.6"
image_name: "pytorch-linux-bionic-rocm3.7-py3.6"
- docker_build_job:
name: "docker-pytorch-linux-bionic-rocm3.8-py3.6"
image_name: "pytorch-linux-bionic-rocm3.8-py3.6"
Expand Down Expand Up @@ -7702,11 +7713,11 @@ workflows:
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-py3.8-gcc9"
resource_class: large
- pytorch_linux_build:
name: pytorch_linux_bionic_rocm3_9_py3_6_build
name: pytorch_linux_bionic_rocm3_7_py3_6_build
requires:
- "docker-pytorch-linux-bionic-rocm3.9-py3.6"
build_environment: "pytorch-linux-bionic-rocm3.9-py3.6-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-rocm3.9-py3.6"
- "docker-pytorch-linux-bionic-rocm3.7-py3.6"
build_environment: "pytorch-linux-bionic-rocm3.7-py3.6-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-bionic-rocm3.7-py3.6"
resource_class: xlarge
- pytorch_macos_10_13_py3_build:
name: pytorch_macos_10_13_py3_build
Expand Down
7 changes: 7 additions & 0 deletions .circleci/docker/build.sh
Expand Up @@ -274,6 +274,13 @@ case "$image" in
VISION=yes
KATEX=yes
;;
pytorch-linux-bionic-rocm3.7-py3.6)
ANACONDA_PYTHON_VERSION=3.6
PROTOBUF=yes
DB=yes
VISION=yes
ROCM_VERSION=3.7
;;
pytorch-linux-bionic-rocm3.8-py3.6)
ANACONDA_PYTHON_VERSION=3.6
PROTOBUF=yes
Expand Down
16 changes: 12 additions & 4 deletions .circleci/verbatim-sources/job-specs/pytorch-job-specs.yml
Expand Up @@ -15,8 +15,12 @@ jobs:
no_output_timeout: "1h"
command: |
set -e
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
# TODO: Remove this after we figure out why rocm tests are failing
if [[ "${DOCKER_IMAGE}" == *rocm3.5* ]]; then
export DOCKER_TAG="ab1632df-fa59-40e6-8c23-98e004f61148"
fi
if [[ "${DOCKER_IMAGE}" == *rocm3.7* ]]; then
export DOCKER_TAG="1045c7b891104cb4fd23399eab413b6213e48aeb"
fi
if [[ ${BUILD_ENVIRONMENT} == *"pure_torch"* ]]; then
echo 'BUILD_CAFFE2=OFF' >> "${BASH_ENV}"
Expand Down Expand Up @@ -96,8 +100,12 @@ jobs:
command: |
set -e
export PYTHONUNBUFFERED=1
if [[ "${DOCKER_IMAGE}" == *rocm3.9* ]]; then
export DOCKER_TAG="f3d89a32912f62815e4feaeed47e564e887dffd6"
# TODO: Remove this after we figure out why rocm tests are failing
if [[ "${DOCKER_IMAGE}" == *rocm3.5* ]]; then
export DOCKER_TAG="ab1632df-fa59-40e6-8c23-98e004f61148"
fi
if [[ "${DOCKER_IMAGE}" == *rocm3.7* ]]; then
export DOCKER_TAG="1045c7b891104cb4fd23399eab413b6213e48aeb"
fi
# See Note [Special build images]
output_image=${DOCKER_IMAGE}:${DOCKER_TAG}-${CIRCLE_SHA1}
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Expand Up @@ -93,6 +93,8 @@ torch/lib64
torch/include/
torch/share/
torch/test/
torch/utils/benchmark/utils/valgrind_wrapper/callgrind.h
torch/utils/benchmark/utils/valgrind_wrapper/valgrind.h
torch/version.py
# Root level file used in CI to specify certain env configs.
# E.g., see .circleci/config.yaml
Expand Down
2 changes: 1 addition & 1 deletion .jenkins/caffe2/build.sh
Expand Up @@ -265,7 +265,7 @@ fi
###############################################################################

# Install ONNX into a local directory
pip install --user -b /tmp/pip_install_onnx "file://${ROOT_DIR}/third_party/onnx#egg=onnx"
pip install --user "file://${ROOT_DIR}/third_party/onnx#egg=onnx"

report_compile_cache_stats

Expand Down
Expand Up @@ -9,10 +9,10 @@ if "%CUDA_SUFFIX%" == "" (

if "%REBUILD%"=="" (
if "%BUILD_ENVIRONMENT%"=="" (
curl --retry 3 -k https://s3.amazonaws.com/ossci-windows/magma_2.5.3_%CUDA_SUFFIX%_%BUILD_TYPE%.7z --output %TMP_DIR_WIN%\magma_2.5.3_%CUDA_SUFFIX%_%BUILD_TYPE%.7z
curl --retry 3 -k https://s3.amazonaws.com/ossci-windows/magma_2.5.4_%CUDA_SUFFIX%_%BUILD_TYPE%.7z --output %TMP_DIR_WIN%\magma_2.5.4_%CUDA_SUFFIX%_%BUILD_TYPE%.7z
) else (
aws s3 cp s3://ossci-windows/magma_2.5.3_%CUDA_SUFFIX%_%BUILD_TYPE%.7z %TMP_DIR_WIN%\magma_2.5.3_%CUDA_SUFFIX%_%BUILD_TYPE%.7z --quiet
aws s3 cp s3://ossci-windows/magma_2.5.4_%CUDA_SUFFIX%_%BUILD_TYPE%.7z %TMP_DIR_WIN%\magma_2.5.4_%CUDA_SUFFIX%_%BUILD_TYPE%.7z --quiet
)
7z x -aoa %TMP_DIR_WIN%\magma_2.5.3_%CUDA_SUFFIX%_%BUILD_TYPE%.7z -o%TMP_DIR_WIN%\magma
7z x -aoa %TMP_DIR_WIN%\magma_2.5.4_%CUDA_SUFFIX%_%BUILD_TYPE%.7z -o%TMP_DIR_WIN%\magma
)
set MAGMA_HOME=%TMP_DIR_WIN%\magma
1 change: 1 addition & 0 deletions BUILD.bazel
Expand Up @@ -131,6 +131,7 @@ genrule(
"aten/src/ATen/RegisterQuantizedCPU.cpp",
"aten/src/ATen/RegisterSparseCPU.cpp",
"aten/src/ATen/RegisterMath.cpp",
"aten/src/ATen/RegisterMeta.cpp",
"aten/src/ATen/RegisterDefaultBackend.cpp",
"aten/src/ATen/RegisterSchema.cpp",
"aten/src/ATen/Functions.h",
Expand Down
6 changes: 0 additions & 6 deletions aten/src/ATen/LegacyTHFunctionsCUDA.h
Expand Up @@ -36,12 +36,6 @@ std::tuple<Tensor,Tensor> _th_topk(const Tensor & self, int64_t k, int64_t dim,
Tensor & _th_renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
Tensor _th_renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
Tensor & _th_renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
Tensor & _th_fmod_out(Tensor & result, const Tensor & self, Scalar other);
Tensor _th_fmod(const Tensor & self, Scalar other);
Tensor & _th_fmod_out(Tensor & result, const Tensor & self, const Tensor & other);
Tensor _th_fmod(const Tensor & self, const Tensor & other);
Tensor & _th_fmod_(Tensor & self, Scalar other);
Tensor & _th_fmod_(Tensor & self, const Tensor & other);
Tensor & _th_cross_kernel_out(Tensor & result, const Tensor & self, const Tensor & other, int64_t dim);
Tensor _th_cross_kernel(const Tensor & self, const Tensor & other, int64_t dim);
std::tuple<Tensor &,Tensor &> _th_gels_out(Tensor & res1, Tensor & res2, const Tensor & self, const Tensor & A);
Expand Down
4 changes: 4 additions & 0 deletions aten/src/ATen/TensorIndexing.h
Expand Up @@ -5,6 +5,10 @@
#include <ATen/ExpandUtils.h>
#include <ATen/Functions.h>

// TODO: try to remove this
// There is some back story, see https://github.com/pytorch/pytorch/issues/48684
#include <ATen/NativeFunctions.h>

namespace at {
namespace indexing {

Expand Down
21 changes: 21 additions & 0 deletions aten/src/ATen/TensorMeta.h
Expand Up @@ -3,8 +3,24 @@
#include <ATen/ATen.h> // TODO: improve
// #include <ATen/NativeFunctions.h>

#include <ATen/DimVector.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Dimname.h>

namespace at {

namespace impl {

struct MetaBase {
virtual void set_output(int64_t output_idx, IntArrayRef sizes, IntArrayRef strides, TensorOptions options, DimnameList names) = 0;
void set_output(IntArrayRef sizes, TensorOptions options) {
set_output(0, sizes, {}, options, {});
}
virtual ~MetaBase() {}
};

} // namespace impl

struct TensorMeta {
DimVector sizes;
// TODO: DimVector strides;
Expand All @@ -14,6 +30,11 @@ struct TensorMeta {
: sizes(_sizes), options(_options) {}
};

inline Tensor meta_tensor_from_meta(const TensorMeta& meta) {
// TODO: eliminate indirection
return at::empty_meta(meta.sizes, meta.options);
}

inline Tensor tensor_from_meta(const TensorMeta& meta) {
// TODO: eliminate indirection
return at::empty(meta.sizes, meta.options);
Expand Down
11 changes: 11 additions & 0 deletions aten/src/ATen/Version.cpp
Expand Up @@ -185,4 +185,15 @@ std::string show_config() {
return ss.str();
}

std::string get_cxx_flags() {
#if defined(FBCODE_CAFFE2)
TORCH_CHECK(
false,
"Buck does not populate the `CXX_FLAGS` field of Caffe2 build options. "
"As a result, `get_cxx_flags` is OSS only."
);
#endif
return caffe2::GetBuildOptions().at("CXX_FLAGS");
}

}
2 changes: 2 additions & 0 deletions aten/src/ATen/Version.h
Expand Up @@ -11,4 +11,6 @@ CAFFE2_API std::string get_mkldnn_version();

CAFFE2_API std::string get_openmp_version();

CAFFE2_API std::string get_cxx_flags();

} // namespace at
9 changes: 9 additions & 0 deletions aten/src/ATen/cuda/CublasHandlePool.cpp
Expand Up @@ -50,6 +50,15 @@ cublasHandle_t getCurrentCUDABlasHandle() {
} else {
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
}
#endif
#if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION >= 308
rocblas_atomics_mode rocblas_mode;
if (at::globalContext().deterministic()) {
rocblas_mode = rocblas_atomics_not_allowed;
} else {
rocblas_mode = rocblas_atomics_allowed;
}
TORCH_CUDABLAS_CHECK(rocblas_set_atomics_mode(handle, rocblas_mode));
#endif
return handle;
}
Expand Down

0 comments on commit 28d5a21

Please sign in to comment.