Skip to content

Commit

Permalink
Merge pull request pytorch#452 from iotamudelta/ifu_20190812
Browse files Browse the repository at this point in the history
IFU 20190812
  • Loading branch information
iotamudelta committed Aug 29, 2019
2 parents f6c5a58 + 863cfe4 commit e4dac20
Show file tree
Hide file tree
Showing 314 changed files with 8,892 additions and 4,065 deletions.
1 change: 1 addition & 0 deletions .circleci/cimodel/data/pytorch_build_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
]),
("9.2", [X("3.6")]),
("10", [X("3.6")]),
("10.1", [X("3.6")]),
]),
("android", [
("r19c", [XImportant("3.6")]),
Expand Down
4 changes: 2 additions & 2 deletions .circleci/cimodel/data/pytorch_build_definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

DOCKER_IMAGE_PATH_BASE = "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/"

DOCKER_IMAGE_VERSION = 323
DOCKER_IMAGE_VERSION = 327


@dataclass
Expand Down Expand Up @@ -222,7 +222,7 @@ def instantiate_configs():
if compiler_name == "clang":
parms_list.append("asan")

if cuda_version in ["9.2", "10"]:
if cuda_version in ["9.2", "10", "10.1"]:
# TODO The gcc version is orthogonal to CUDA version?
parms_list.append("gcc7")

Expand Down
113 changes: 73 additions & 40 deletions .circleci/config.yml

Large diffs are not rendered by default.

2 changes: 0 additions & 2 deletions .circleci/scripts/binary_checkout.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ popd
# Clone the Builder master repo
git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT"
pushd "$BUILDER_ROOT"
git fetch origin
git reset origin/master --hard
echo "Using builder from "
git --no-pager log --max-count 1
popd
2 changes: 1 addition & 1 deletion .circleci/scripts/binary_linux_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pkg="/final_pkgs/\$(ls /final_pkgs)"
if [[ "$PACKAGE_TYPE" == conda ]]; then
conda install -y "\$pkg" --offline
if [[ "$DESIRED_CUDA" == 'cpu' ]]; then
conda install -y cpu-only -c pytorch
conda install -y cpuonly -c pytorch
fi
retry conda install -yq future numpy protobuf six
if [[ "$DESIRED_CUDA" != 'cpu' ]]; then
Expand Down
2 changes: 1 addition & 1 deletion .circleci/scripts/binary_linux_upload.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pushd /home/circleci/project/final_pkgs
if [[ "$PACKAGE_TYPE" == conda ]]; then
retry conda install -yq anaconda-client
retry timeout 30 /home/circleci/project/login_to_anaconda.sh
anaconda upload "$(ls)" -u pytorch --label main --no-progress --force
anaconda upload "$(ls)" -u pytorch-nightly --label main --no-progress --force
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
retry pip install -q awscli
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
Expand Down
2 changes: 1 addition & 1 deletion .circleci/scripts/binary_macos_upload.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pushd "$workdir/final_pkgs"
if [[ "$PACKAGE_TYPE" == conda ]]; then
retry conda install -yq anaconda-client
retry /Users/distiller/project/login_to_anaconda.sh
retry anaconda upload "$(ls)" -u pytorch --label main --no-progress --force
retry anaconda upload "$(ls)" -u pytorch-nightly --label main --no-progress --force
elif [[ "$PACKAGE_TYPE" == libtorch ]]; then
retry pip install -q awscli
s3_dir="s3://pytorch/libtorch/${PIP_UPLOAD_FOLDER}${DESIRED_CUDA}/"
Expand Down
11 changes: 6 additions & 5 deletions .circleci/scripts/binary_populate_env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@ fi

# We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it
export DATE="$(date -u +%Y%m%d)"
if [[ "$(uname)" == 'Darwin' ]] || [[ "$DESIRED_CUDA" == "cu100" ]]; then
export PYTORCH_BUILD_VERSION="1.2.0.dev$DATE"
if [[ "$(uname)" == 'Darwin' ]] || [[ "$DESIRED_CUDA" == "cu100" ]] || [[ "$PACKAGE_TYPE" == conda ]]; then
export PYTORCH_BUILD_VERSION="1.3.0.dev$DATE"
else
export PYTORCH_BUILD_VERSION="1.2.0.dev$DATE+$DESIRED_CUDA"
export PYTORCH_BUILD_VERSION="1.3.0.dev$DATE+$DESIRED_CUDA"
fi
export PYTORCH_BUILD_NUMBER=1

Expand All @@ -72,12 +72,13 @@ export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
export DESIRED_DEVTOOLSET="$DESIRED_DEVTOOLSET"
export DATE="$DATE"
export NIGHTLIES_DATE_PREAMBLE=1.2.0.dev
export NIGHTLIES_DATE_PREAMBLE=1.3.0.dev
export PYTORCH_BUILD_VERSION="$PYTORCH_BUILD_VERSION"
export PYTORCH_BUILD_NUMBER="$PYTORCH_BUILD_NUMBER"
export OVERRIDE_PACKAGE_VERSION="$PYTORCH_BUILD_VERSION"
export TORCH_PACKAGE_NAME='torch-nightly'
# TODO: We don't need this anymore IIUC
export TORCH_PACKAGE_NAME='torch'
export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
export USE_FBGEMM=1
Expand Down
6 changes: 3 additions & 3 deletions .circleci/verbatim-sources/job-specs-custom.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
pytorch_short_perf_test_gpu:
environment:
BUILD_ENVIRONMENT: pytorch-short-perf-test-gpu
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:323"
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:327"
PYTHON_VERSION: "3.6"
USE_CUDA_DOCKER_RUNTIME: "1"
resource_class: gpu.medium
Expand Down Expand Up @@ -42,7 +42,7 @@
environment:
BUILD_ENVIRONMENT: pytorch-python-doc-push
# TODO: stop hardcoding this
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:323"
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:327"
resource_class: large
machine:
image: ubuntu-1604:201903-01
Expand Down Expand Up @@ -92,7 +92,7 @@
pytorch_cpp_doc_push:
environment:
BUILD_ENVIRONMENT: pytorch-cpp-doc-push
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:323"
DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda9-cudnn7-py3:327"
resource_class: large
machine:
image: ubuntu-1604:201903-01
Expand Down
Empty file removed .github/CONTRIBUTING.md
Empty file.
58 changes: 0 additions & 58 deletions .travis.yml

This file was deleted.

10 changes: 9 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ cmake_policy(SET CMP0025 NEW)
# ---[ Project and semantic versioning.
project(Caffe2 CXX C)

if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
set(LINUX TRUE)
else()
set(LINUX FALSE)
endif()

set(CMAKE_INSTALL_MESSAGE NEVER)

set(CMAKE_CXX_STANDARD 11)
Expand Down Expand Up @@ -121,7 +127,9 @@ cmake_dependent_option(
"USE_NCCL" OFF)
option(USE_NNAPI "Use NNAPI" OFF)
option(USE_NNPACK "Use NNPACK" ON)
option(USE_NUMA "Use NUMA (only available on Linux)" ON)
cmake_dependent_option(
USE_NUMA "Use NUMA. Only available on Linux." ON
"LINUX" OFF)
cmake_dependent_option(
USE_NVRTC "Use NVRTC. Only available if USE_CUDA is on." OFF
"USE_CUDA" OFF)
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/ATen.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@
#include <ATen/Tensor.h>
#include <ATen/TensorGeometry.h>
#include <ATen/TensorOperators.h>
#include <ATen/TensorOptions.h>
#include <ATen/Version.h>
#include <ATen/core/ATenGeneral.h>
#include <ATen/core/Generator.h>
#include <c10/core/Layout.h>
#include <ATen/core/Scalar.h>
#include <c10/core/Storage.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Exception.h>
#include <ATen/core/ATenDispatch.h>
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/Context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

#include <ATen/Context.h>

#include <c10/core/TensorOptions.h>
#include <ATen/core/TensorOptions.h>

#include <thread>
#include <mutex>
Expand Down
103 changes: 0 additions & 103 deletions aten/src/ATen/Declarations.cwrap
Original file line number Diff line number Diff line change
Expand Up @@ -1768,40 +1768,6 @@
- THTensor* self
- THTensor* self
]]
[[
name: _th_atan2
types:
- floating_point
backends:
- CPU
- CUDA
variants:
- function
cname: atan2
return: argument 0
arguments:
- arg: THTensor* result
output: True
- arg: THTensor* self
broadcast: other fallback
- THTensor* other
]]
[[
name: _th_atan2_
types:
- floating_point
backends:
- CPU
- CUDA
cname: atan2
variants: function
return: argument 0
arguments:
- THTensor* self
- arg: THTensor* self
broadcast: other fallback inplace
- THTensor* other
]]
[[
name: _th_pow
cname: pow
Expand Down Expand Up @@ -2355,75 +2321,6 @@
- THTensor* batch1
- THTensor* batch2
]]
[[
name: _th_addcmul
cname: addcmul
variants:
- function
backends:
- CUDA
return: argument 0
arguments:
- arg: THTensor* result
output: True
- arg: THTensor* self
broadcast: tensor1,tensor2 fallback
- arg: real value
default: AS_REAL(1)
kwarg_only: True
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: _th_addcmul_
options:
- cname: addcmul
variants: function
backends:
- CUDA
return: argument 0
arguments:
- THTensor* self
- arg: THTensor* self
broadcast: tensor1,tensor2 inplace fallback
- arg: real value
default: AS_REAL(1)
kwarg_only: True
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: _th_addcdiv
cname: addcdiv
variants:
- function
return: argument 0
arguments:
- arg: THTensor* result
output: True
- arg: THTensor* self
broadcast: tensor1,tensor2 fallback
- arg: real value
default: AS_REAL(1)
kwarg_only: True
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: _th_addcdiv_
cname: addcdiv
variants: function
return: argument 0
arguments:
- THTensor* self
- arg: THTensor* self
broadcast: tensor1,tensor2 inplace fallback
- arg: real value
default: AS_REAL(1)
kwarg_only: True
- THTensor* tensor1
- THTensor* tensor2
]]
[[
name: _th_gels
cname: gels
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/Dispatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ struct ScalarTypeToCType<at::ScalarType::Half> {
// due to ambiguous reference which can't to be resolved. For some reason it cant pick between at::detail and at::cuda::detail.
// For repro example, please see: https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba
// TODO: remove once the bug is fixed.
static at::Half t;
static type t;
};

template<>
Expand All @@ -44,7 +44,7 @@ struct ScalarTypeToCType<at::ScalarType::BFloat16> {
// due to ambiguous reference which can't to be resolved. For some reason it cant pick between at::detail and at::cuda::detail.
// For repro example, please see: https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba
// TODO: remove once the bug is fixed.
static at::BFloat16 t;
static type t;
};

template<>
Expand All @@ -55,7 +55,7 @@ struct ScalarTypeToCType<at::ScalarType::Bool> {
// due to ambiguous reference which can't to be resolved. For some reason it cant pick between at::detail and at::cuda::detail.
// For repro example, please see: https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba
// TODO: remove once the bug is fixed.
static bool t;
static type t;
};

template<>
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/InitialTensorOptions.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#pragma once

#include <c10/core/TensorOptions.h>
#include <ATen/core/TensorOptions.h>

namespace at {

Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/TensorOptions.h
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
#pragma once
#include <c10/core/TensorOptions.h>
#include <ATen/core/TensorOptions.h>
2 changes: 1 addition & 1 deletion aten/src/ATen/core/DeprecatedTypeProperties.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
#include <c10/core/Backend.h>
#include <c10/core/ScalarType.h>
#include <c10/core/Layout.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/TensorOptions.h>
#include <c10/core/Storage.h>
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
#include <ATen/core/Generator.h>
Expand Down

0 comments on commit e4dac20

Please sign in to comment.