Skip to content

Commit

Permalink
Update on "Properly check that reduction strings are valid for l1_los…
Browse files Browse the repository at this point in the history
…s, smoothl1_loss, and mse_loss."

Differential Revision: [D23306786](https://our.internmc.facebook.com/intern/diff/D23306786)
  • Loading branch information
gchanan committed Aug 30, 2020
2 parents 42269a8 + 86199dc commit 24b31ac
Show file tree
Hide file tree
Showing 321 changed files with 42,263 additions and 2,283 deletions.
4 changes: 3 additions & 1 deletion .circleci/cimodel/data/pytorch_build_data.py
Expand Up @@ -5,7 +5,9 @@
("xenial", [
("rocm", [
("3.5.1", [
X("3.6"),
("3.6", [
('build_only', [XImportant(True)]),
]),
]),
]),
("gcc", [
Expand Down
2 changes: 1 addition & 1 deletion .circleci/cimodel/data/pytorch_build_definitions.py
Expand Up @@ -307,7 +307,7 @@ def instantiate_configs():
parallel_backend = fc.find_prop("parallel_backend") or None
build_only = fc.find_prop("build_only") or False
is_coverage = fc.find_prop("is_coverage") or False
if build_only and restrict_phases is None:
if build_only:
restrict_phases = ["build"]
if is_coverage and restrict_phases is None:
restrict_phases = ["build", "coverage_test"]
Expand Down
5 changes: 0 additions & 5 deletions .circleci/cimodel/data/simple/mobile_definitions.py
Expand Up @@ -57,11 +57,6 @@ def gen_tree(self):
[DOCKER_REQUIREMENT_ASAN],
["build"]
),
MobileJob(
DOCKER_IMAGE_ASAN,
[DOCKER_REQUIREMENT_ASAN],
["custom", "build", "static"]
),

# Use LLVM-DEV toolchain in android-ndk-r19c docker image
MobileJob(
Expand Down
52 changes: 0 additions & 52 deletions .circleci/config.yml
Expand Up @@ -6120,54 +6120,9 @@ workflows:
name: pytorch_linux_xenial_rocm3_5_1_py3_6_build
requires:
- "docker-pytorch-linux-xenial-rocm3.5.1-py3.6"
filters:
branches:
only:
- master
- /ci-all\/.*/
- /release\/.*/
build_environment: "pytorch-linux-xenial-rocm3.5.1-py3.6-build"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-rocm3.5.1-py3.6"
resource_class: xlarge
- pytorch_linux_test:
name: pytorch_linux_xenial_rocm3_5_1_py3_6_test1
requires:
- pytorch_linux_xenial_rocm3_5_1_py3_6_build
filters:
branches:
only:
- master
- /ci-all\/.*/
- /release\/.*/
build_environment: "pytorch-linux-xenial-rocm3.5.1-py3.6-test1"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-rocm3.5.1-py3.6"
resource_class: pytorch/amd-gpu
- pytorch_linux_test:
name: pytorch_linux_xenial_rocm3_5_1_py3_6_test2
requires:
- pytorch_linux_xenial_rocm3_5_1_py3_6_build
filters:
branches:
only:
- master
- /ci-all\/.*/
- /release\/.*/
build_environment: "pytorch-linux-xenial-rocm3.5.1-py3.6-test2"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-rocm3.5.1-py3.6"
resource_class: pytorch/amd-gpu
- pytorch_linux_test:
name: pytorch_linux_xenial_rocm3_5_1_py3_6_caffe2_test
requires:
- pytorch_linux_xenial_rocm3_5_1_py3_6_build
filters:
branches:
only:
- master
- /ci-all\/.*/
- /release\/.*/
build_environment: "pytorch-linux-xenial-rocm3.5.1-py3.6-caffe2_test"
docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-rocm3.5.1-py3.6"
resource_class: pytorch/amd-gpu
- pytorch_linux_build:
name: pytorch_linux_xenial_py3_6_gcc5_4_build
requires:
Expand Down Expand Up @@ -6652,13 +6607,6 @@ workflows:
name: pytorch_linux_xenial_py3_clang5_mobile_build
requires:
- docker-pytorch-linux-xenial-py3-clang5-asan
- pytorch_linux_build:
build_environment: pytorch-linux-xenial-py3-clang5-mobile-custom-build-static
build_only: "1"
docker_image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan
name: pytorch_linux_xenial_py3_clang5_mobile_custom_build_static
requires:
- docker-pytorch-linux-xenial-py3-clang5-asan
- pytorch_linux_build:
build_environment: pytorch-linux-xenial-py3-clang5-mobile-custom-build-dynamic
build_only: "1"
Expand Down
3 changes: 0 additions & 3 deletions .circleci/docker/build.sh
Expand Up @@ -160,7 +160,6 @@ case "$image" in
KATEX=yes
;;
pytorch-linux-xenial-cuda11.0-cudnn8-py3-gcc7)
UBUNTU_VERSION=16.04-rc
CUDA_VERSION=11.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.6
Expand Down Expand Up @@ -230,7 +229,6 @@ case "$image" in
VISION=yes
;;
pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9)
UBUNTU_VERSION=18.04-rc
CUDA_VERSION=11.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.6
Expand All @@ -241,7 +239,6 @@ case "$image" in
KATEX=yes
;;
pytorch-linux-bionic-cuda11.0-cudnn8-py3.8-gcc9)
UBUNTU_VERSION=18.04-rc
CUDA_VERSION=11.0
CUDNN_VERSION=8
ANACONDA_PYTHON_VERSION=3.8
Expand Down
2 changes: 2 additions & 0 deletions .circleci/docker/common/install_conda.sh
Expand Up @@ -86,6 +86,8 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
conda_install magma-cuda101 -c pytorch
elif [[ "$CUDA_VERSION" == 10.2* ]]; then
conda_install magma-cuda102 -c pytorch
elif [[ "$CUDA_VERSION" == 11.0* ]]; then
conda_install magma-cuda110 -c pytorch
fi

# TODO: This isn't working atm
Expand Down
4 changes: 2 additions & 2 deletions .circleci/scripts/binary_upload.sh
Expand Up @@ -30,7 +30,7 @@ do_backup() {
(
pushd /tmp/workspace
set -x
${AWS_S3_CP} --recursive . "${BACKUP_BUCKET}/${CIRCLE_TAG}/${backup_dir}"
${AWS_S3_CP} --recursive . "${BACKUP_BUCKET}/${CIRCLE_TAG}/${backup_dir}/"
)
}

Expand All @@ -52,7 +52,7 @@ s3_upload() {
local pkg_type
extension="$1"
pkg_type="$2"
s3_dir="${UPLOAD_BUCKET}/${pkg_type}/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}"
s3_dir="${UPLOAD_BUCKET}/${pkg_type}/${UPLOAD_CHANNEL}/${UPLOAD_SUBFOLDER}/"
(
for pkg in ${PKG_DIR}/*.${extension}; do
(
Expand Down
4 changes: 1 addition & 3 deletions .jenkins/pytorch/build-mobile.sh
Expand Up @@ -22,9 +22,7 @@ retry pip install --pre torch torchvision \

# Run end-to-end process of building mobile library, linking into the predictor
# binary, and running forward pass with a real model.
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-static* ]]; then
TEST_CUSTOM_BUILD_STATIC=1 test/mobile/custom_build/build.sh
elif [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
echo "LLVM_DIR: ${LLVM_DIR}"
TEST_CUSTOM_BUILD_DYNAMIC=1 test/mobile/custom_build/build.sh
Expand Down
30 changes: 14 additions & 16 deletions BUILD.bazel
Expand Up @@ -2,7 +2,7 @@ load("@bazel_skylib//lib:paths.bzl", "paths")
load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_proto_library", "cc_test")
load("//third_party:substitution.bzl", "template_rule")
load("//third_party:substitution.bzl", "header_template_rule")
load("//:tools/build_variables.bzl", "torch_cpp_srcs", "libtorch_python_core_sources", "libtorch_core_sources", "libtorch_distributed_sources", "libtorch_extra_sources", "jit_core_sources")
load("//tools/rules:cu.bzl", "cu_library")
load("//tools/config:defs.bzl", "if_cuda")
Expand All @@ -27,19 +27,18 @@ COMMON_COPTS = [
])

# c10
template_rule(
header_template_rule(
name = "cmake_macros_h",
src = "c10/macros/cmake_macros.h.in",
out = "c10/macros/cmake_macros.h",
substitutions = {
"cmakedefine": "define",
"#define FEATURE_TORCH_MOBILE": "/* #undef FEATURE_TORCH_MOBILE */",
"#define USE_STATIC_DISPATCH": "/* #undef USE_STATIC_DISPATCH */",
"#define C10_USE_NUMA": "/* #undef C10_USE_NUMA */",
},
)

template_rule(
header_template_rule(
name = "cuda_cmake_macros_h",
src = "c10/cuda/impl/cuda_cmake_macros.h.in",
out = "c10/cuda/impl/cuda_cmake_macros.h",
Expand All @@ -58,13 +57,12 @@ cc_library(
"c10/macros/*.h",
"c10/util/*.h",
"c10/util/*.hpp",
]) + [
"c10/macros/cmake_macros.h",
"c10/cuda/impl/cuda_cmake_macros.h",
],
]),
deps = [
"@com_github_gflags_gflags//:gflags",
"@com_github_glog//:glog",
":cmake_macros_h",
":cuda_cmake_macros_h",
],
)

Expand Down Expand Up @@ -531,7 +529,7 @@ filegroup(
],
)

template_rule(
header_template_rule(
name = "aten_src_ATen_config",
src = "aten/src/ATen/Config.h.in",
out = "aten/src/ATen/Config.h",
Expand All @@ -547,7 +545,7 @@ template_rule(
},
)

template_rule(
header_template_rule(
name = "aten_src_ATen_cuda_config",
src = "aten/src/ATen/cuda/CUDAConfig.h.in",
out = "aten/src/ATen/cuda/CUDAConfig.h",
Expand All @@ -558,7 +556,7 @@ template_rule(
},
)

template_rule(
header_template_rule(
name = "aten_src_TH_THGeneral",
src = "aten/src/TH/THGeneral.h.in",
out = "aten/src/TH/THGeneral.h",
Expand All @@ -570,7 +568,7 @@ template_rule(
},
)

template_rule(
header_template_rule(
name = "aten_src_THC_THCGeneral",
src = "aten/src/THC/THCGeneral.h.in",
out = "aten/src/THC/THCGeneral.h",
Expand All @@ -582,8 +580,6 @@ template_rule(
cc_library(
name = "aten_headers",
hdrs = [
"aten/src/TH/THGeneral.h",
"aten/src/THC/THCGeneral.h",
"torch/csrc/WindowsTorchApiMacro.h",
"torch/csrc/jit/frontend/function_schema_parser.h",
] + glob([
Expand All @@ -605,6 +601,8 @@ cc_library(
],
deps = [
":c10_headers",
":aten_src_TH_THGeneral",
":aten_src_THC_THCGeneral",
],
)

Expand Down Expand Up @@ -766,7 +764,7 @@ cc_proto_library(
deps = [":caffe2_proto_source"],
)

template_rule(
header_template_rule(
name = "caffe2_core_macros_h",
src = "caffe2/core/macros.h.in",
out = "caffe2/core/macros.h",
Expand Down Expand Up @@ -1586,7 +1584,6 @@ filegroup(
cc_library(
name = "caffe2_for_aten_headers",
hdrs = [
"caffe2/core/macros.h",
"caffe2/core/common.h",
"caffe2/core/logging.h",
"caffe2/core/types.h",
Expand All @@ -1604,6 +1601,7 @@ cc_library(
deps = [
":c10_headers",
":caffe2_protos",
":caffe2_core_macros_h",
],
)

Expand Down
1 change: 0 additions & 1 deletion CMakeLists.txt
Expand Up @@ -124,7 +124,6 @@ option(BUILD_PYTHON "Build Python binaries" ON)
option(BUILD_CAFFE2_OPS "Build Caffe2 operators" ON)
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
option(BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" OFF)
option(USE_STATIC_DISPATCH "Use static dispatch for ATen operators" OFF)
cmake_dependent_option(
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
"BUILD_SHARED_LIBS AND BUILD_CUSTOM_PROTOBUF" OFF)
Expand Down

0 comments on commit 24b31ac

Please sign in to comment.