Skip to content

Commit

Permalink
Revert D25544731: [PyTorch] Avoid extra Tensor refcounting in _cat_ou…
Browse files Browse the repository at this point in the history
…t_cpu

Test Plan: revert-hammer

Differential Revision:
D25544731 (1a05104)

Original commit changeset: 7b9656d0371a

fbshipit-source-id: 0f7ea74eca282cadf269bbd284d59650a431ed65
  • Loading branch information
Mike Ruberry authored and facebook-github-bot committed Dec 18, 2020
1 parent 385f6b4 commit 625bc40
Showing 1 changed file with 12 additions and 17 deletions.
29 changes: 12 additions & 17 deletions aten/src/ATen/native/TensorShape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,10 +101,6 @@ static inline void check_cat_shape_except_dim(const Tensor & first, const Tensor
}
}

static bool should_skip(const Tensor& t) {
return t.numel() == 0 && t.dim() == 1;
}

Tensor & _cat_out_cpu(Tensor& result, TensorList tensors, int64_t dim) {
// previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible
// to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors
Expand All @@ -113,6 +109,7 @@ Tensor & _cat_out_cpu(Tensor& result, TensorList tensors, int64_t dim) {
// FIXME: warn if this is the case
bool allSkipped = true;
bool allContiguous = true;
Tensor notSkippedTensor;

// Inputs cannot alias the output tensor
for (int64_t i = 0; i < tensors.size(); i++) {
Expand All @@ -124,21 +121,19 @@ Tensor & _cat_out_cpu(Tensor& result, TensorList tensors, int64_t dim) {
}
at::assert_no_internal_overlap(result);

const Tensor* pnotSkippedTensor = [](TensorList tensors) -> const Tensor* {
for (auto const &tensor : tensors) {
if (should_skip(tensor)) {
continue;
}
// we've found a non-empty tensor
return &tensor;
auto should_skip = [](const Tensor& t) { return t.numel() == 0 && t.dim() == 1; };
for (auto const &tensor : tensors) {
if (should_skip(tensor)) {
continue;
}
return nullptr;
}(tensors);

if (!pnotSkippedTensor) {
// we've found a non-empty tensor
allSkipped = false;
notSkippedTensor = tensor;
break;
}
if (allSkipped) {
return result;
}
const Tensor& notSkippedTensor = *pnotSkippedTensor;

TORCH_CHECK(tensors.size() > 0, "expected a non-empty list of Tensors");
TORCH_CHECK(dim <= notSkippedTensor.dim(), "dimension ", dim, "out of range");
Expand Down Expand Up @@ -196,7 +191,7 @@ Tensor & _cat_out_cpu(Tensor& result, TensorList tensors, int64_t dim) {
if (reuse_iterator &&
result.is_contiguous(first_tensor_mem_format) &&
no_type_promotion) {
const auto& source_slice = notSkippedTensor;
auto source_slice = notSkippedTensor;
auto slice_dim_size = source_slice.size(dim);
auto result_slice = result.narrow(dim, 0, slice_dim_size);
auto result_slice_data = result_slice.data_ptr();
Expand Down

0 comments on commit 625bc40

Please sign in to comment.