Skip to content

Commit

Permalink
[vmap] symintify alias and squeeze (#107577)
Browse files Browse the repository at this point in the history
Following tests now pass (both ops call into `alias` on certain paths)

```
PYTORCH_TEST_WITH_DYNAMO=1 pytest test/functorch/test_vmap.py -k test_squeeze -v
PYTORCH_TEST_WITH_DYNAMO=1 pytest test/functorch/test_vmap.py -k test_conj -v
```

NOTE: Ideally, this symint version should work with non symint version as well but that would mean changes at multiple places. Wanted to get a review for this fix before-hand.

Other sites which use the `IntArrayRef` overload.
https://github.com/pytorch/pytorch/blob/5f56c4fb32dbb5dd4e75a3a3a9726ae95931926d/aten/src/ATen/native/TensorShape.cpp#L1707-L1713

`view_impl` is called from `view` and `_unsafe_view`.
https://github.com/pytorch/pytorch/blob/5f56c4fb32dbb5dd4e75a3a3a9726ae95931926d/aten/src/ATen/native/TensorShape.cpp#L3295-L3306

Pull Request resolved: #107577
Approved by: https://github.com/zou3519
  • Loading branch information
kshitij12345 authored and pytorchmergebot committed Aug 31, 2023
1 parent 138fafe commit 50fa588
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 5 deletions.
8 changes: 4 additions & 4 deletions aten/src/ATen/functorch/BatchRulesViews.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,8 @@ std::tuple<Tensor, optional<int64_t>> squeeze_batch_rule(const Tensor& self, opt
// Manually calculate the output shape by eliding all dimensions of
// size 1 keeping track of where the batch index started and where it
// ended up moving to. We also ensure we do not drop the batch index.
auto shape = self.sizes();
DimVector squeezed_sizes;
auto shape = self.sym_sizes();
SymDimVector squeezed_sizes;
bool before_batch_idx = true;
int64_t new_batch_idx = 0;
int64_t original_idx = 0;
Expand All @@ -219,7 +219,7 @@ std::tuple<Tensor, optional<int64_t>> squeeze_batch_rule(const Tensor& self, opt
++original_idx;
}

auto result = self.view(squeezed_sizes);
auto result = self.view_symint(squeezed_sizes);
return std::make_tuple(std::move(result), c10::optional<int64_t>(new_batch_idx));
}

Expand Down Expand Up @@ -453,7 +453,7 @@ std::tuple<Tensor, optional<int64_t>> expand_batch_rule(
"must be greater or equal to the number of dimensions in the tensor (", static_cast<uint64_t>(self_dim - 1), ")");

auto self_ = moveBatchDimToFront(self, self_bdim);
auto self_sizes = self_.sizes();
auto self_sizes = self_.sym_sizes();
auto batch_size = self_sizes[0];

c10::SmallVector<c10::SymInt> size_(size.size() + 1);
Expand Down
25 changes: 24 additions & 1 deletion aten/src/ATen/native/TensorShape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1601,6 +1601,29 @@ Tensor alias_with_sizes_and_strides(
return self_;
}

// specialization for symbolic shapes and strides.
// SymIntArrayRef/ArrayRef<c10::SymInt> and SmallVector<c10::SymInt>/SymDimVector
template <template <typename...> typename Container>
Tensor alias_with_sizes_and_strides(
const Tensor& self,
const Container<c10::SymInt>& sizes,
const Container<c10::SymInt>& strides) {
//caller should make sure that sizes and strides are valid for self
//(storage is sufficient, strides are non-negative, strides and sizes array size is the same)
Tensor self_;
if (self.is_quantized()) {
self_ = at::detail::make_tensor<QTensorImpl>(
c10::TensorImpl::VIEW, Storage(self.storage()), self.key_set(), self.dtype(), get_qtensorimpl(self)->quantizer());
self_.unsafeGetTensorImpl()->set_sizes_and_strides(sizes, strides, self.sym_storage_offset());
} else {
self_ = at::detail::make_tensor<TensorImpl>(
c10::TensorImpl::VIEW, Storage(self.storage()), self.key_set(), self.dtype());
self_.unsafeGetTensorImpl()->set_sizes_and_strides(sizes, strides, self.sym_storage_offset());
}
namedinference::propagate_names(self_, self);
return self_;
}

Tensor reshape_symint(const Tensor& self, c10::SymIntArrayRef proposed_shape) {
if (self.is_sparse()) {
AT_ERROR("reshape is not implemented for sparse tensors");
Expand Down Expand Up @@ -3686,7 +3709,7 @@ Tensor view(const Tensor& self,
}

Tensor alias(const Tensor& self) {
return alias_with_sizes_and_strides(self, self.sizes(), self.strides());
return alias_with_sizes_and_strides(self, self.sym_sizes(), self.sym_strides());
}

Tensor detach(const Tensor& self) {
Expand Down

0 comments on commit 50fa588

Please sign in to comment.