Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change some comments related to moving copy_ to native #19618

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions aten/src/ATen/SparseTensorUtils.h
Expand Up @@ -33,8 +33,8 @@ inline void alias_into_sparse(const SparseTensor& self, const LongTensor& indice
inline void copy_into_sparse(const SparseTensor& self, const LongTensor& indices, const Tensor& values, bool non_blocking) {
alias_into_sparse(
self,
indices.to(self._indices().options(), non_blocking, /*copy*/true),
values.to(self._values().options(), non_blocking, /*copy*/true));
indices.to(self._indices().options(), non_blocking, /*copy=*/true),
values.to(self._values().options(), non_blocking, /*copy=*/true));
}

// TODO: put this into the public API
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/core/DeprecatedTypeProperties.cpp
Expand Up @@ -15,9 +15,9 @@ Storage DeprecatedTypeProperties::unsafeStorageFromTH(void * th_pointer, bool re

Tensor DeprecatedTypeProperties::copy(const Tensor & src, bool non_blocking, c10::optional<Device> to_device) const {
if (to_device) {
return src.to(src.options().dtype(scalarType()).device(to_device), non_blocking, /*copy*/true);
return src.to(src.options().dtype(scalarType()).device(to_device), non_blocking, /*copy=*/true);
}
return src.to(src.options().dtype(scalarType()), non_blocking, /*copy*/true);
return src.to(src.options().dtype(scalarType()), non_blocking, /*copy=*/true);
}

std::unique_ptr<Generator> DeprecatedTypeProperties::generator() const {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/TensorMethods.h
Expand Up @@ -16,7 +16,7 @@ inline Tensor Tensor::toType(const DeprecatedTypeProperties & t, bool non_blocki
return to(
at::device(t.device_type()).layout(t.layout()).dtype(t.scalarType()),
non_blocking,
/*copy*/ true);
/*copy=*/ true);
}

inline Tensor Tensor::cpu() const {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/templates/TensorMethods.h
Expand Up @@ -16,7 +16,7 @@ inline Tensor Tensor::toType(const DeprecatedTypeProperties & t, bool non_blocki
return to(
at::device(t.device_type()).layout(t.layout()).dtype(t.scalarType()),
non_blocking,
/*copy*/ true);
/*copy=*/ true);
}

inline Tensor Tensor::cpu() const {
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/test/scalar_test.cpp
Expand Up @@ -90,8 +90,8 @@ TEST(TestScalar, TestScalar) {
test_overflow();

if (at::hasCUDA()) {
auto r = next_h.to(at::Device(kCUDA), kFloat, /*non_blocking*/ false, /*copy*/ true);
ASSERT_TRUE(r.to(at::Device(kCPU), kFloat, /*non_blocking*/ false, /*copy*/ true).equal(next_h));
auto r = next_h.to(at::Device(kCUDA), kFloat, /*non_blocking=*/ false, /*copy=*/ true);
ASSERT_TRUE(r.to(at::Device(kCPU), kFloat, /*non_blocking=*/ false, /*copy=*/ true).equal(next_h));
}
ASSERT_NO_THROW(randn({10, 10, 2}, options));

Expand Down
1 change: 1 addition & 0 deletions torch/csrc/autograd/VariableTypeManual.cpp
Expand Up @@ -222,6 +222,7 @@ void VariableType::set_data(Tensor & self, Tensor new_data) const {
as_variable_ref(self).set_data(new_data);
}

// We don't have an outplace copy, so this can't be generated automatically
Tensor & VariableType::copy_(Tensor & self, const Tensor & src, bool non_blocking) const {
jit::Value* output = nullptr;
if(torch::jit::tracer::isTracing()) {
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/autograd/functions/tensor.cpp
Expand Up @@ -30,8 +30,8 @@ auto CopyBackwards::apply(variable_list&& grads) -> variable_list {
grad_inputs[1] = grad.to(
src_type->device_type(),
src_type->scalarType(),
/*non_blocking*/false,
/*copy*/true);
/*non_blocking=*/false,
/*copy=*/true);
} else {
grad_inputs[1] = grad.toType(*src_type);
}
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/cuda/comm.cpp
Expand Up @@ -73,8 +73,8 @@ std::vector<Tensor> broadcast(const Tensor& tensor, IntArrayRef devices) {
tensors.push_back(tensor.to(
at::Device(kCUDA, device),
tensor.scalar_type(),
/*non_blocking*/true,
/*copy*/true));
/*non_blocking=*/true,
/*copy=*/true));
}
}
return tensors;
Expand Down