-
Notifications
You must be signed in to change notification settings - Fork 21.4k
/
Functions.h
126 lines (111 loc) · 3.98 KB
/
Functions.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#pragma once
// ${generated_comment}
#include <c10/core/Scalar.h>
#include <ATen/Tensor.h>
#include <c10/core/Storage.h>
#include <ATen/core/Generator.h>
#include <c10/util/Deprecated.h>
#include <ATen/NativeFunctions.h> // TODO: try to delete this
#include <ATen/DeviceGuard.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Optional.h>
#include <ATen/TensorUtils.h>
#include <ATen/Context.h>
#include <ATen/TracerMode.h>
#include <ATen/core/op_registration/hacky_wrapper_for_legacy_signatures.h>
namespace at {
${function_declarations}
// Special C++ only overloads for std()-like functions (See gh-40287)
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
// So, for example std(0) would select the std(unbiased=False) overload
inline Tensor var(const Tensor& self, int dim) {
return at::native::var(self, IntArrayRef{dim});
}
inline std::tuple<Tensor,Tensor> var_mean(const Tensor& self, int dim) {
return at::native::var_mean(self, IntArrayRef{dim});
}
inline Tensor std(const Tensor& self, int dim) {
return at::native::std(self, IntArrayRef{dim});
}
inline std::tuple<Tensor,Tensor> std_mean(const Tensor& self, int dim) {
return at::native::std_mean(self, IntArrayRef{dim});
}
namespace {
inline std::vector<int64_t> zero_sizes(const TensorOptions& options) {
if (options.has_memory_format()) {
auto memory_format = *options.memory_format_opt();
if (at::MemoryFormat::ChannelsLast == memory_format) {
return {0, 0, 0, 0};
}
if (at::MemoryFormat::ChannelsLast3d == memory_format) {
return {0, 0, 0, 0, 0};
}
}
return {0};
}
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {},
const c10::optional<Device> target_device = c10::nullopt) {
AutoNonVariableTypeMode guard; // TODO: remove
tracer::impl::NoTracerDispatchMode tracer_guard;
auto device = (target_device.has_value()?
target_device.value() : globalContext().getDeviceFromPtr(data, options.device().type()));
if (options.device().has_index()) {
TORCH_CHECK(
options.device() == device,
"Specified device ", options.device(),
" does not match device of data ", device);
}
auto storage = Storage(
Storage::use_byte_size_t(),
detail::computeStorageNbytes(sizes, strides, options.dtype().itemsize()),
InefficientStdFunctionContext::makeDataPtr(data, deleter, device),
/*allocator=*/nullptr,
/*resizable=*/false);
return empty(IntArrayRef(zero_sizes(options)), options).set_(storage, 0, sizes, strides);
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {}) {
return from_blob(data, sizes, detail::defaultStrides(sizes), deleter, options);
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
const TensorOptions& options = {}) {
AutoNonVariableTypeMode guard; // TODO: remove
tracer::impl::NoTracerDispatchMode tracer_guard;
auto device = globalContext().getDeviceFromPtr(data, options.device().type());
if (options.device().has_index()) {
TORCH_CHECK(
options.device() == device,
"Specified device ", options.device(),
" does not match device of data ", device);
}
auto storage = Storage(
Storage::use_byte_size_t(),
detail::computeStorageNbytes(sizes, strides, options.dtype().itemsize()),
DataPtr(data, nullptr, [](void*) {}, device),
/*allocator=*/nullptr,
/*resizable=*/false);
return empty(IntArrayRef(zero_sizes(options)), options).set_(storage, 0, sizes, strides);
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
const TensorOptions& options = {}) {
return from_blob(data, sizes, detail::defaultStrides(sizes), options);
}
inline int64_t numel(const Tensor& tensor) {
return tensor.numel();
}
}