Skip to content

Commit

Permalink
Merge pull request #118 from iotamudelta/master
Browse files Browse the repository at this point in the history
Merge from upstream
  • Loading branch information
iotamudelta committed Aug 13, 2018
2 parents 5e2bc5a + 77f497c commit 6eef9c2
Show file tree
Hide file tree
Showing 97 changed files with 3,042 additions and 4,220 deletions.
2 changes: 1 addition & 1 deletion .jenkins/caffe2/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ if [[ -z "$INTEGRATED" ]]; then
else
sudo FULL_CAFFE2=1 python setup.py install
FULL_CAFFE2=1 python setup.py install --user
# TODO: I'm not sure why this is necessary
cp -r torch/lib/tmp_install $INSTALL_PREFIX
Expand Down
5 changes: 0 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -288,11 +288,6 @@ include_directories(BEFORE ${PROJECT_BINARY_DIR})

include_directories(BEFORE ${PROJECT_SOURCE_DIR}/aten/src/)

# ---[ Old caffe protobuf
if(BUILD_CAFFE2)
add_subdirectory(caffe/proto)
endif()

# ---[ Main build
add_subdirectory(caffe2)

Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ IF(USE_CUDA AND NOT USE_ROCM)
ENDIF(USE_MAGMA)
IF ($ENV{ATEN_STATIC_CUDA})
list(APPEND ATen_CUDA_DEPENDENCY_LIBS "${CUDA_TOOLKIT_ROOT_DIR}/lib64/libculibos.a")
list(APPEND ATen_CUDA_DEPENDENCY_LIBS "${CUDA_TOOLKIT_ROOT_DIR}/lib64/libcudart_static.a")
ENDIF($ENV{ATEN_STATIC_CUDA})
ENDIF()

Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/CPUApplyUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ inline void _setup_arrays(Tensor& tensor, Arg* iter) {
for (int64_t i = 0; i < max_dim; i++) {
int64_t size = tensor.size(i);
int64_t stride = tensor.stride(i);
while (i + 1 < max_dim &&
while (tensor.stride(i) > 0 && i + 1 < max_dim &&
(tensor.size(i + 1) == 1 ||
tensor.stride(i) == tensor.size(i + 1) * tensor.stride(i + 1))) {
size = size * tensor.size(i + 1);
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/Declarations.cwrap
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
- THStorage* source
- long storage_offset
- IntListSize size
- arg: IntListStride stride
- arg: IntList stride
default: {}
]]
[[
Expand Down Expand Up @@ -3408,13 +3408,13 @@
- cname: newWithSize
arguments:
- IntListSize size
- arg: IntListStride stride
- IntList stride
- cname: newWithStorage
arguments:
- THStorage* storage
- int64_t storageOffset
- IntListSize size
- arg: IntListStride stride
- arg: IntList stride
default: {}
]]

Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/Device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,12 @@ Device::Device(const std::string& device_string) : Device(Type::CPU) {
}
}

} // namespace at

std::ostream& operator<<(std::ostream& stream, const at::Device& device) {
stream << device.type();
if (device.has_index()) {
stream << ":" << device.index();
}
return stream;
}

} // namespace at
4 changes: 3 additions & 1 deletion aten/src/ATen/Device.h
Original file line number Diff line number Diff line change
Expand Up @@ -111,10 +111,12 @@ struct Device {
DeviceType type_;
int32_t index_ = -1;
};
} // namespace at

AT_API std::ostream& operator<<(std::ostream& stream, const at::Device& device);

} // namespace at


namespace std {
template<> struct hash<at::Device>
{
Expand Down
3 changes: 2 additions & 1 deletion aten/src/ATen/Layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ inline Layout layout_from_backend(Backend backend) {
return Layout::Strided;
}
}
} // namespace at

inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
switch (layout) {
Expand All @@ -32,3 +31,5 @@ inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
AT_ERROR("Unknown layout");
}
}

} // namespace at
32 changes: 0 additions & 32 deletions aten/src/ATen/THSizeStrideCompat.h

This file was deleted.

3 changes: 3 additions & 0 deletions aten/src/ATen/TensorGeometry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
namespace at {

bool TensorGeometry::is_contiguous() const {
if (numel_ == 0) {
return true;
}
int64_t dim = sizes_.size();
int64_t expected_stride = 1;
for (int64_t i = dim - 1; i >= 0; i--) {
Expand Down
13 changes: 5 additions & 8 deletions aten/src/ATen/TensorGeometry.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@ struct AT_API TensorGeometry {
strides_[i] = expected_stride;
expected_stride *= sizes_[i];
}
numel_ = expected_stride;
}

explicit TensorGeometry(const Tensor& t)
: sizes_(t.sizes().vec())
, strides_(t.strides().vec())
, storage_offset_(t.storage_offset()) {}
, storage_offset_(t.storage_offset())
, numel_(t.numel()) {}

// true if the tensor is contiguous
bool is_contiguous() const;
Expand All @@ -43,13 +45,7 @@ struct AT_API TensorGeometry {
}
IntList strides() const { return IntList{ strides_ }; }
int64_t storage_offset() const { return storage_offset_; }
int64_t numel() const {
int64_t r = 1;
for (auto s : sizes()) {
r *= s;
}
return r;
}
int64_t numel() const { return numel_; }

TensorGeometry transpose(int64_t dim0, int64_t dim1) {
TensorGeometry r = *this; // copy
Expand All @@ -63,6 +59,7 @@ struct AT_API TensorGeometry {
std::vector<int64_t> sizes_;
std::vector<int64_t> strides_;
int64_t storage_offset_;
int64_t numel_;
};

} // namespace at
2 changes: 1 addition & 1 deletion aten/src/ATen/TensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
oss << "Tensor for " << t2 << " is on CPU, ";
}
oss << "but expected " << ((!(t1->is_cuda() || t2->is_cuda())) ? "them" : "it")
<< " to be on GPU (while checking arguments for " << c << ")";
<< " to be on GPU (while checking arguments for " << c << ")";
AT_ERROR(oss.str());
}
AT_CHECK(
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/core/DeviceType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ std::string DeviceTypeName(at::DeviceType d, bool lower_case) {
}
}

} // namespace at

std::ostream& operator<<(std::ostream& stream, at::DeviceType type) {
stream << at::DeviceTypeName(type, /* lower case */ true);
return stream;
}

} // namespace at
4 changes: 2 additions & 2 deletions aten/src/ATen/core/DeviceType.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@ AT_CORE_API std::string DeviceTypeName(
at::DeviceType d,
bool lower_case = false);

} // namespace at

AT_CORE_API std::ostream& operator<<(std::ostream& stream, at::DeviceType type);

} // namespace at
4 changes: 4 additions & 0 deletions aten/src/ATen/cuda/detail/KernelUtils.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
#pragma once

#include "ATen/ATen.h"

// Contents of this file are copied from THCUNN/common.h for the ease of porting
// THCUNN functions into ATen.

Expand All @@ -14,6 +17,7 @@ constexpr int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N)
{
AT_ASSERTM(N > 0, "CUDA kernel launch blocks must be positive, but got N=", N);
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}

Expand Down
19 changes: 13 additions & 6 deletions aten/src/ATen/cuda/detail/TensorInfo.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ namespace detail {
// CUDA kernel argument that defines tensor layout
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p,
int dim,
IndexType sz[MAX_TENSORINFO_DIMS],
Expand Down Expand Up @@ -47,6 +48,12 @@ struct TensorInfo {
int dims;
};

template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo() {
data = nullptr;
dims = 0;
}

template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo(T* p,
int dim,
Expand All @@ -73,7 +80,7 @@ template <typename T, typename IndexType>
int
TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {

AT_CHECK(excludeDim >= -1 && excludeDim < dims,
AT_CHECK(excludeDim >= -1 && excludeDim < dims,
"expected excluded dim between -1 and dims - 1");

int stopDim = (excludeDim == -1) ? dims : excludeDim;
Expand All @@ -87,20 +94,20 @@ TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
if (sizes[oldIndex] == 1) {
continue;
}

++newIndex;
sizes[newIndex] = sizes[oldIndex];
strides[newIndex] = strides[oldIndex];
++oldIndex;
break;
break;
}

// Collapses dims
for (; oldIndex < stopDim; ++oldIndex) {
if (sizes[oldIndex] == 1) {
continue;
}

if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
sizes[newIndex] *= sizes[oldIndex];
strides[newIndex] = strides[oldIndex];
Expand All @@ -113,7 +120,7 @@ TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {

// Handles excludeDim being set (oldIndex == excludeDim)
if (oldIndex != dims) {

// Preserves excluded dimension
++newIndex;
sizes[newIndex] = sizes[oldIndex];
Expand Down Expand Up @@ -146,7 +153,7 @@ struct IndexToOffset {
static __host__ __device__ IndexType get(
IndexType linearId,
const TensorInfo<T, IndexType>& info) {

IndexType offset = 0;

// Uses static dims
Expand Down
25 changes: 6 additions & 19 deletions aten/src/ATen/function_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,6 @@ def __init__(self, reason):
'THStorage*': 'Storage &',
'THGenerator*': 'Generator *',
'IntListSize': 'IntList',
'IntListStride': 'IntList',
'accreal': 'Scalar',
'real': 'Scalar',
'long': 'int64_t',
Expand All @@ -228,7 +227,6 @@ def __init__(self, reason):
'THStorage*': 'Storage',
'THGenerator*': 'Generator*',
'IntListSize': 'IntList',
'IntListStride': 'IntList',
'accreal': 'accreal',
'real': 'real',
'long': 'int64_t',
Expand Down Expand Up @@ -297,8 +295,6 @@ def __init__(self, reason):
CodeTemplate(
'check_generator<${Backend}Generator>(${arg_name}, &globalContext().defaultGenerator(backend()))'),
# This is a cast done via direct-construction
'IntListSize': CodeTemplate('at::IntList ${result_name} = get_intlist_size_th(${arg_name});'),
'IntListStride': CodeTemplate('at::IntList ${result_name} = get_intlist_stride_th(${arg_name});'),
'real': CodeTemplate('${arg_name}.to${ScalarName}()'),
'accreal': CodeTemplate('${arg_name}.to${AccScalarName}()'),
'TensorList': CodeTemplate(
Expand All @@ -308,8 +304,6 @@ def __init__(self, reason):
'IntList': CodeTemplate('check_intlist<${size}>(${arg_name}, "${arg_name}", ${arg_pos}${,default_init})')
}

DIRECT_CONSTRUCTION_CHECKED_CAST = {'IntListSize', 'IntListStride'}

CHECKED_USE = {
'THTensor*': '{}_->tensor',
'THSTensor*': '{}_->tensor',
Expand Down Expand Up @@ -1374,19 +1368,12 @@ def emit_body(env, option):
if 'default_init' in arg:
default_init.append(arg['default_init'])

if arg['type'] in DIRECT_CONSTRUCTION_CHECKED_CAST:
body.append(CHECKED_CAST[arg['type']].substitute(
env, arg_name=arg['name'], arg_pos=count,
null_okay=null_okay, default_init=default_init,
size=arg.get('size'),
result_name=arg['name'] + '_'))
else:
check_cast = CHECKED_CAST[arg['type']].substitute(
env, arg_name=arg['name'], arg_pos=count,
null_okay=null_okay, default_init=default_init,
size=arg.get('size'))
body.append("auto {}_ = {};".format(
arg['name'], check_cast))
check_cast = CHECKED_CAST[arg['type']].substitute(
env, arg_name=arg['name'], arg_pos=count,
null_okay=null_okay, default_init=default_init,
size=arg.get('size'))
body.append("auto {}_ = {};".format(
arg['name'], check_cast))
if drop_argument(arg, option) or replace_with_null(arg):
body.append(
"(void) {}_; //silence unused warning".format(arg['name']))
Expand Down
Loading

0 comments on commit 6eef9c2

Please sign in to comment.