Skip to content

Commit

Permalink
Avoid direct reference to at::native::tensor from TensorDataContainer (
Browse files Browse the repository at this point in the history
…#47567)

Summary: Pull Request resolved: #47567

Test Plan: Imported from OSS

Reviewed By: ezyang

Differential Revision: D24822517

Pulled By: iseeyuan

fbshipit-source-id: f69bfc029aae5199dbc63193fc7a5e5e6feb5790
  • Loading branch information
iseeyuan authored and facebook-github-bot committed Nov 18, 2020
1 parent c6c6a53 commit 4883d39
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 58 deletions.
80 changes: 73 additions & 7 deletions aten/src/ATen/Utils.cpp
@@ -1,10 +1,12 @@
#include <ATen/Utils.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/Functions.h>
#include <ATen/detail/CUDAHooksInterface.h>
#include <stdarg.h>
#include <cstdlib>
#include <stdexcept>
#include <typeinfo>
#include <cstdlib>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/Context.h>

namespace at {

Expand All @@ -15,9 +17,16 @@ int _crash_if_asan(int arg) {
}

namespace detail {
// empty_cpu is used in ScalarOps.h, which can be referenced by other ATen files. Since we want to decouple direct referencing native symbols and only access native symbols through dispatching, we move its implementation here.
Tensor empty_cpu(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
// empty_cpu is used in ScalarOps.h, which can be referenced by other ATen
// files. Since we want to decouple direct referencing native symbols and only
// access native symbols through dispatching, we move its implementation here.
Tensor empty_cpu(
IntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt) {
Device device = device_or_default(device_opt);

TORCH_CHECK(device.type() == DeviceType::CPU);
Expand Down Expand Up @@ -53,6 +62,63 @@ Tensor empty_cpu(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::opt

return tensor;
}

template <typename T>
Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options) {
auto result = at::empty(values.size(), options);
AT_ASSERT(result.is_contiguous());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(result.scalar_type(), "tensor_cpu", [&] {
std::copy(
values.begin(), values.end(), result.template data_ptr<scalar_t>());
});
return result;
}

template <typename T>
Tensor tensor_backend(ArrayRef<T> values, const TensorOptions& options) {
auto cpu_tensor = tensor_cpu(values, options.device(DeviceType::CPU));
return cpu_tensor.to(options.device());
}

template <typename T>
Tensor tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options) {
auto result = at::empty(values.size(), options);
AT_ASSERT(result.is_contiguous());
AT_DISPATCH_COMPLEX_TYPES(result.scalar_type(), "tensor_cpu", [&] {
std::copy(
values.begin(), values.end(), result.template data_ptr<scalar_t>());
});
return result;
}

template <typename T>
Tensor tensor_complex_backend(
ArrayRef<T> values,
const TensorOptions& options) {
auto cpu_tensor = tensor_complex_cpu(values, options.device(DeviceType::CPU));
return cpu_tensor.to(options.device());
}
} // namespace detail

} // at
#define TENSOR(T, _1) \
Tensor tensor(ArrayRef<T> values, const TensorOptions& options) { \
if (options.device().type() != c10::DeviceType::CPU) { \
return at::detail::tensor_backend(values, options); \
} else { \
return at::detail::tensor_cpu(values, options); \
} \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
#undef TENSOR

#define TENSOR(T, _1) \
Tensor tensor(ArrayRef<T> values, const TensorOptions& options) { \
if (options.device().type() != c10::DeviceType::CPU) { \
return at::detail::tensor_complex_backend(values, options); \
} else { \
return at::detail::tensor_complex_cpu(values, options); \
} \
}
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR
} // namespace at
17 changes: 17 additions & 0 deletions aten/src/ATen/Utils.h
Expand Up @@ -138,6 +138,23 @@ namespace detail {
CAFFE2_API
Tensor empty_cpu(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt);

template <typename T>
CAFFE2_API
Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options);

template <typename T>
CAFFE2_API
Tensor tensor_backend(ArrayRef<T> values, const TensorOptions& options);

template <typename T>
CAFFE2_API
Tensor tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options);

template <typename T>
CAFFE2_API
Tensor tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options);
} // namespace detail


} // at
42 changes: 4 additions & 38 deletions aten/src/ATen/native/TensorFactories.cpp
Expand Up @@ -1046,58 +1046,24 @@ Tensor vander(const Tensor& x, c10::optional<int64_t> N, bool increasing) {

template <typename T>
Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options) {
auto result = at::empty(values.size(), options);
AT_ASSERT(result.is_contiguous());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX(result.scalar_type(), "tensor_cpu", [&] {
std::copy(values.begin(), values.end(), result.template data_ptr<scalar_t>());
});
return result;
return at::detail::tensor_cpu(values, options);
}

template <typename T>
Tensor tensor_backend(ArrayRef<T> values, const TensorOptions& options) {
auto cpu_tensor = tensor_cpu(values, options.device(DeviceType::CPU));
return cpu_tensor.to(options.device());
return at::detail::tensor_backend(values, options);
}

template <typename T>
Tensor tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options) {
auto result = at::empty(values.size(), options);
AT_ASSERT(result.is_contiguous());
AT_DISPATCH_COMPLEX_TYPES(result.scalar_type(), "tensor_cpu", [&] {
std::copy(values.begin(), values.end(), result.template data_ptr<scalar_t>());
});
return result;
return at::detail::tensor_complex_cpu(values, options);
}

template <typename T>
Tensor tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options) {
auto cpu_tensor = tensor_complex_cpu(values, options.device(DeviceType::CPU));
return cpu_tensor.to(options.device());
return at::detail::tensor_complex_backend(values, options);
}

#define TENSOR(T, _1) \
Tensor tensor(ArrayRef<T> values, const TensorOptions& options) { \
if (options.device().type() != c10::DeviceType::CPU) { \
return tensor_backend(values, options); \
} else { \
return tensor_cpu(values, options); \
} \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
#undef TENSOR

#define TENSOR(T, _1) \
Tensor tensor(ArrayRef<T> values, const TensorOptions& options) { \
if (options.device().type() != c10::DeviceType::CPU) { \
return tensor_complex_backend(values, options); \
} else { \
return tensor_complex_cpu(values, options); \
} \
}
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR

Tensor from_file(std::string filename, c10::optional<bool> shared, c10::optional<int64_t> size, const TensorOptions& options) {
TORCH_CHECK(!options.pinned_memory(), "tensors constructed from a file cannot be pinned");
int64_t my_size = size.value_or(0);
Expand Down
2 changes: 0 additions & 2 deletions aten/src/ATen/templates/Functions.cpp
Expand Up @@ -7,8 +7,6 @@

namespace at {

using native::tensor;

${function_definitions}

}
2 changes: 0 additions & 2 deletions aten/src/ATen/templates/Functions.h
Expand Up @@ -19,8 +19,6 @@

namespace at {

using native::tensor;

${function_declarations}

// Special C++ only overloads for std()-like functions (See gh-40287)
Expand Down
18 changes: 9 additions & 9 deletions aten/src/ATen/templates/NativeFunctions.h
Expand Up @@ -3,9 +3,9 @@
// ${generated_comment}

#include <ATen/Context.h>
#include <ATen/core/Reduction.h>
#include <c10/core/ScalarType.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>

#include <array>
#include <functional>
Expand All @@ -23,31 +23,31 @@ struct Type;
} // namespace at

namespace at {
namespace native {

// These functions are defined in native/TensorFactories.cpp.
// These functions are defined in ATen/Utils.cpp.
#define TENSOR(T, S) \
CAFFE2_API Tensor tensor(ArrayRef<T> values, const TensorOptions& options); \
inline Tensor tensor( \
std::initializer_list<T> values, const TensorOptions& options) { \
return native::tensor(ArrayRef<T>(values), options); \
return at::tensor(ArrayRef<T>(values), options); \
} \
inline Tensor tensor(T value, const TensorOptions& options) { \
return native::tensor(ArrayRef<T>(value), options); \
return at::tensor(ArrayRef<T>(value), options); \
} \
inline Tensor tensor(ArrayRef<T> values) { \
return native::tensor(std::move(values), at::dtype(k##S)); \
return at::tensor(std::move(values), at::dtype(k##S)); \
} \
inline Tensor tensor(std::initializer_list<T> values) { \
return native::tensor(ArrayRef<T>(values)); \
return at::tensor(ArrayRef<T>(values)); \
} \
inline Tensor tensor(T value) { \
return native::tensor(ArrayRef<T>(value)); \
return at::tensor(ArrayRef<T>(value)); \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR

namespace native {

${native_function_declarations}

} // namespace native
Expand Down

0 comments on commit 4883d39

Please sign in to comment.