Skip to content

Commit

Permalink
Added support for expand in LazyTensor shape inference (#77830) (#7…
Browse files Browse the repository at this point in the history
…7830)

Summary:
Added support for `expand` in LazyTensor shape inference
Fixes #77831

 ---

**Blockers:**

- [x] #77880
- [x] #77882

Pull Request resolved: #77830
Approved by: https://github.com/Krovatkin

Test Plan: contbuild & OSS CI, see https://hud.pytorch.org/commit/pytorch/pytorch/0922cc024eeafa2158c0d00396494a0ae983f8cb

Reviewed By: b0noI

Differential Revision: D37523035

fbshipit-source-id: 2e88e9a8a85c0a9e504fec92925cec0a05588892
  • Loading branch information
miladm authored and facebook-github-bot committed Jun 29, 2022
1 parent 9bbb908 commit e69617a
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 0 deletions.
55 changes: 55 additions & 0 deletions torch/csrc/lazy/core/shape_inference.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@
#include <torch/csrc/api/include/torch/enum.h>
#include <torch/csrc/lazy/core/ops/utils.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/core/util.h>
#include <torch/csrc/lazy/ts_backend/dynamic_ir.h>
#include <ostream>
#include <vector>

Expand Down Expand Up @@ -420,6 +422,59 @@ std::vector<Shape> compute_shape_embedding_dense_backward(
Shape(grad_output.scalar_type(), {num_weights, grad_output.size(-1)})};
}

std::vector<Shape> compute_shape_expand(
const at::Tensor& self,
at::IntArrayRef size,
bool implicit) {
CHECK_GE(size.size(), self.dim());
int64_t num_new_dimensions = size.size() - self.dim();
std::vector<int64_t> padded_self(num_new_dimensions, 0);
padded_self.insert(
padded_self.end(), self.sizes().begin(), self.sizes().end());
std::vector<int64_t> target_size(size.size());
for (const auto idx : c10::irange(size.size())) {
target_size[idx] = size[idx] == -1 ? padded_self[idx] : size[idx];
}
return {Shape(self.scalar_type(), target_size)};
}

std::vector<Shape> compute_shape_expand(
const at::Tensor& self,
c10::SymIntArrayRef size,
bool implicit) {
CHECK_GE(size.size(), self.dim());
std::vector<c10::SymInt> _sizes = ToVector<c10::SymInt>(size);
int64_t num_new_dimensions = _sizes.size() - self.dim();
std::vector<int64_t> padded_self(num_new_dimensions, 0);
padded_self.insert(
padded_self.end(), self.sizes().begin(), self.sizes().end());
std::vector<int64_t> target_size(_sizes.size());
for (const auto idx : c10::irange(_sizes.size())) {
if (_sizes[idx].is_symbolic()) {
std::shared_ptr<c10::SymbolicIntNode> symbolicIntNode =
_sizes[idx].toSymbolicIntNode();
auto lazySymIntNode =
std::dynamic_pointer_cast<torch::lazy::SymbolicIntNode>(
symbolicIntNode);
auto size_node = lazySymIntNode->node_;
auto static_value =
std::dynamic_pointer_cast<torch::lazy::DimensionNode>(size_node)
->getStaticValue();
target_size[idx] = static_value;
} else {
target_size[idx] = _sizes[idx].data();
if (_sizes[idx].data() == -1) {
// -1 can't be specified for non-existing dimensions
TORCH_CHECK(idx >= num_new_dimensions);
target_size[idx] = padded_self[idx];
} else {
target_size[idx] = _sizes[idx].data();
}
}
}
return {Shape(self.scalar_type(), target_size)};
}

std::vector<Shape> compute_shape_index_select(
const at::Tensor& self,
int64_t dim,
Expand Down
6 changes: 6 additions & 0 deletions torch/csrc/lazy/core/shape_inference.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,15 @@

#include <ATen/Tensor.h>
#include <c10/core/ScalarType.h>
#include <c10/core/SymInt.h>
#include <c10/core/SymIntArrayRef.h>
#include <c10/core/SymbolicIntNode.h>
#include <c10/macros/Export.h>
#include <c10/util/Optional.h>
#include <torch/csrc/lazy/backend/backend_data.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/core/tensor.h>
#include <vector>

namespace torch {
Expand All @@ -30,6 +34,8 @@ TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution(const at::Te
TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, at::IntArrayRef size, bool implicit);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_flip(const at::Tensor & self, at::IntArrayRef dims);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim);
Expand Down

0 comments on commit e69617a

Please sign in to comment.