diff --git a/aten/src/ATen/ScalarType.h b/aten/src/ATen/ScalarType.h index 421fe8b11b959..4cb68a6370625 100644 --- a/aten/src/ATen/ScalarType.h +++ b/aten/src/ATen/ScalarType.h @@ -20,6 +20,15 @@ _(at::Half,Half,d) \ _(float,Float,d) \ _(double,Double,d) +#define AT_FORALL_SCALAR_TYPES_EXCEPT_HALF(_) \ +_(uint8_t,Byte,i) \ +_(int8_t,Char,i) \ +_(int16_t,Short,i) \ +_(int,Int,i) \ +_(int64_t,Long,i) \ +_(float,Float,d) \ +_(double,Double,d) + enum class ScalarType { #define DEFINE_ENUM(_1,n,_2) \ n, diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index 1db5eaf3aac68..b9e292abe7452 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -12,6 +12,7 @@ #include "ATen/NativeFunctions.h" #include "ATen/ScalarType.h" #include "ATen/Deprecated.h" +#include "ATen/DeviceGuard.h" #include "TH/THRandom.h" #include @@ -593,5 +594,21 @@ Tensor hann_window( return native::hamming_window( window_length, periodic, /*alpha=*/0.5, /*beta=*/0.5, options); } + +template +Tensor tensor(ArrayRef values, const TensorOptions& options) { + auto result = at::empty(values.size(), options); + for (size_t i = 0; i < values.size(); ++i) { + result[i] = values[i]; + } + return result; +} + +#define TENSOR(T, _1, _2) \ + Tensor tensor(ArrayRef values, const TensorOptions& options) { \ + return tensor(values, options); \ + } +AT_FORALL_SCALAR_TYPES_EXCEPT_HALF(TENSOR) +#undef TENSOR } // namespace native } // namespace at diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 8405fb8d73f25..2474b29d57b4a 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -1573,10 +1573,10 @@ SparseCPU: new_with_size_sparse - func: tensor(Type dtype) -> Tensor - variants: function + variants: [] - func: tensor(Type dtype, IntList size) -> Tensor - variants: function + variants: [] # NB: The function overloads are removed to avoid a nasty bug where @@ -1598,10 +1598,10 @@ SparseCPU: new_with_tensor_and_size_sparse - func: sparse_coo_tensor(IndexTensor indices, Tensor values) -> Tensor - variants: function + variants: [] - func: sparse_coo_tensor(IndexTensor indices, Tensor values, IntList size) -> Tensor - variants: function + variants: [] - func: _native_sparse_coo_tensor_unsafe(IndexTensor indices, Tensor values, IntList size) -> Tensor diff --git a/aten/src/ATen/templates/Functions.h b/aten/src/ATen/templates/Functions.h index 5ed201d4b5e41..584316abf392c 100644 --- a/aten/src/ATen/templates/Functions.h +++ b/aten/src/ATen/templates/Functions.h @@ -15,6 +15,7 @@ namespace at { using native::from_blob; +using native::tensor; ${function_declarations} diff --git a/aten/src/ATen/templates/NativeFunctions.h b/aten/src/ATen/templates/NativeFunctions.h index e380c390299c4..2c84f212a7cc4 100644 --- a/aten/src/ATen/templates/NativeFunctions.h +++ b/aten/src/ATen/templates/NativeFunctions.h @@ -38,6 +38,28 @@ inline Tensor from_blob( return native::from_blob(data, sizes, [](void*) {}, options); } +// These functions are defined in native/TensorFactories.cpp. +#define TENSOR(T, S, _1) \ + Tensor tensor(ArrayRef values, const TensorOptions& options); \ + inline Tensor tensor( \ + std::initializer_list values, const TensorOptions& options) { \ + return native::tensor(ArrayRef(values), options); \ + } \ + inline Tensor tensor(T value, const TensorOptions& options) { \ + return native::tensor(ArrayRef(value), options); \ + } \ + inline Tensor tensor(ArrayRef values) { \ + return native::tensor(std::move(values), at::dtype(k##S)); \ + } \ + inline Tensor tensor(std::initializer_list values) { \ + return native::tensor(ArrayRef(values)); \ + } \ + inline Tensor tensor(T value) { \ + return native::tensor(ArrayRef(value)); \ + } +AT_FORALL_SCALAR_TYPES_EXCEPT_HALF(TENSOR) +#undef TENSOR + ${native_function_declarations} } // namespace native diff --git a/aten/src/TH/THBlasUtils.h b/aten/src/TH/THBlasUtils.h index 37aeeb419867f..8281047f24402 100644 --- a/aten/src/TH/THBlasUtils.h +++ b/aten/src/TH/THBlasUtils.h @@ -6,15 +6,6 @@ // rather than by name directly. Someone should figure out a reasonable way to // rewrite these in more idiomatic ATen and move it into ATen proper. -#define AT_FORALL_SCALAR_TYPES_EXCEPT_HALF(_) \ -_(uint8_t,Byte,i) \ -_(int8_t,Char,i) \ -_(int16_t,Short,i) \ -_(int,Int,i) \ -_(int64_t,Long,i) \ -_(float,Float,d) \ -_(double,Double,d) - template inline void THBlas_axpy(int64_t n, T a, T *x, int64_t incx, T *y, int64_t incy); diff --git a/test/cpp/api/tensor.cpp b/test/cpp/api/tensor.cpp index 00e555f751800..6388f8e3b6182 100644 --- a/test/cpp/api/tensor.cpp +++ b/test/cpp/api/tensor.cpp @@ -4,6 +4,18 @@ #include +#include + +template +bool exactly_equal(at::Tensor left, T right) { + return at::Scalar(left).to() == right; +} + +template +bool almost_equal(at::Tensor left, T right, T tolerance = 1e-4) { + return std::abs(at::Scalar(left).to() - right) < tolerance; +} + #define REQUIRE_TENSOR_OPTIONS(device_, index_, type_, layout_) \ REQUIRE(tensor.device().type() == at::Device((device_), (index_)).type()); \ REQUIRE(tensor.device().index() == at::Device((device_), (index_)).index()); \ @@ -83,3 +95,69 @@ TEST_CASE("Tensor/ToDoesNotCopyWhenOptionsAreAllTheSame") { auto hopefully_not_copy = tensor.to(at::kFloat); REQUIRE(hopefully_not_copy.data() == tensor.data()); } + +TEST_CASE("Tensor/ContainsCorrectValueForSingleValue") { + auto tensor = at::tensor(123); + REQUIRE(tensor.numel() == 1); + REQUIRE(tensor.dtype() == at::kInt); + REQUIRE(tensor[0].toCInt() == 123); + + tensor = at::tensor(123.456f); + REQUIRE(tensor.numel() == 1); + REQUIRE(tensor.dtype() == at::kFloat); + REQUIRE(almost_equal(tensor[0], 123.456f)); + + tensor = at::tensor(123.456); + REQUIRE(tensor.numel() == 1); + REQUIRE(tensor.dtype() == at::kDouble); + REQUIRE(almost_equal(tensor[0], 123.456)); +} + +TEST_CASE("Tensor/ContainsCorrectValuesForManyValues") { + auto tensor = at::tensor({1, 2, 3}); + REQUIRE(tensor.numel() == 3); + REQUIRE(tensor.dtype() == at::kInt); + REQUIRE(exactly_equal(tensor[0], 1)); + REQUIRE(exactly_equal(tensor[1], 2)); + REQUIRE(exactly_equal(tensor[2], 3)); + + tensor = at::tensor({1.5, 2.25, 3.125}); + REQUIRE(tensor.numel() == 3); + REQUIRE(tensor.dtype() == at::kDouble); + REQUIRE(almost_equal(tensor[0], 1.5)); + REQUIRE(almost_equal(tensor[1], 2.25)); + REQUIRE(almost_equal(tensor[2], 3.125)); +} + +TEST_CASE("Tensor/ContainsCorrectValuesWhenConstructedFromVector") { + std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + auto tensor = at::tensor(v); + REQUIRE(tensor.numel() == v.size()); + REQUIRE(tensor.dtype() == at::kInt); + for (size_t i = 0; i < v.size(); ++i) { + REQUIRE(exactly_equal(tensor[i], v.at(i))); + } + + std::vector w = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0}; + tensor = at::tensor(w); + REQUIRE(tensor.numel() == w.size()); + REQUIRE(tensor.dtype() == at::kFloat); + for (size_t i = 0; i < w.size(); ++i) { + REQUIRE(almost_equal(tensor[i], w.at(i))); + } +} + +TEST_CASE("Tensor/UsesOptionsThatAreSupplied") { + auto tensor = at::tensor(123, dtype(at::kFloat)) + 0.5; + REQUIRE(tensor.numel() == 1); + REQUIRE(tensor.dtype() == at::kFloat); + REQUIRE(almost_equal(tensor[0], 123.5)); + + tensor = at::tensor({1.1, 2.2, 3.3}, dtype(at::kInt)); + REQUIRE(tensor.numel() == 3); + REQUIRE(tensor.dtype() == at::kInt); + REQUIRE(tensor.layout() == at::kStrided); + REQUIRE(exactly_equal(tensor[0], 1)); + REQUIRE(exactly_equal(tensor[1], 2)); + REQUIRE(exactly_equal(tensor[2], 3)); +} diff --git a/test/cpp/api/tensor_cuda.cpp b/test/cpp/api/tensor_cuda.cpp new file mode 100644 index 0000000000000..eefeab6ca19c2 --- /dev/null +++ b/test/cpp/api/tensor_cuda.cpp @@ -0,0 +1,10 @@ +#include + +#include + +#include + +TEST_CASE("Tensor/AllocatesTensorOnTheCorrectDevice", "[cuda]") { + auto tensor = at::tensor({1, 2, 3}, at::device({at::kCUDA, 1})); + REQUIRE(tensor.device() == at::Device(at::kCUDA, 1)); +} diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt index 8a42d87241536..0c5bcd3a2ca86 100644 --- a/torch/CMakeLists.txt +++ b/torch/CMakeLists.txt @@ -362,6 +362,7 @@ if (TORCH_BUILD_TEST) ${TORCH_API_TEST_DIR}/serialization.cpp ${TORCH_API_TEST_DIR}/static.cpp ${TORCH_API_TEST_DIR}/tensor.cpp + ${TORCH_API_TEST_DIR}/tensor_cuda.cpp # Temporary until ATen tests are built with Caffe2 ${TORCH_API_TEST_DIR}/tensor_options.cpp ${TORCH_API_TEST_DIR}/tensor_options_cuda.cpp