Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th No.26】为 paddle.view 进行功能增强 #64205

Merged
merged 17 commits into from
May 17, 2024
27 changes: 26 additions & 1 deletion paddle/phi/kernels/stride/view_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,32 @@ void ViewShapeKernel(const Context& dev_ctx,
const DenseTensor& input,
const std::vector<int64_t>& dims,
DenseTensor* out) {
DDim new_dims = DDim(dims.data(), static_cast<int>(dims.size()));
// infer dims
auto infer_dim = -1;
auto new_size = 1;
auto numel = input.numel();
std::vector<int64_t> dims_copy = dims;
for (int dim = 0, ndim = dims_copy.size(); dim < ndim; ++dim) {
if (dims_copy[dim] == -1) {
if (infer_dim >= 0) {
PADDLE_THROW(phi::errors::Fatal("Only one dimension can be inferred"));
}
infer_dim = dim;
} else if (dims_copy[dim] >= 0) {
new_size *= dims_copy[dim];
} else {
yinfan98 marked this conversation as resolved.
Show resolved Hide resolved
PADDLE_THROW(phi::errors::OutOfRange("Tensor idx is out of range"));
}
}
PADDLE_ENFORCE_NE(new_size,
0,
phi::errors::Unavailable(
"cannot reshape tensor of 0 elements into shape "));
if (infer_dim >= 0 && new_size > 0 && numel % new_size == 0) {
dims_copy[infer_dim] = numel / new_size;
}

DDim new_dims = DDim(dims_copy.data(), static_cast<int>(dims_copy.size()));
DDim stride;
if (ReshapeStride(input.dims(), input.strides(), new_dims, stride)) {
auto meta = input.meta();
Expand Down
18 changes: 18 additions & 0 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6401,6 +6401,24 @@ def view(x, shape_or_dtype, name=None):
>>> print(out.shape)
[2, 4, 24]

>>> import paddle
>>> paddle.base.set_flags({"FLAGS_use_stride_kernel": True})

>>> x = paddle.rand([2, 4, 6], dtype="float32")

>>> out = paddle.view(x, [8, -1])
>>> print(out.shape)
[8, 6]

>>> import paddle
>>> paddle.base.set_flags({"FLAGS_use_stride_kernel": True})

>>> x = paddle.rand([2, 4, 6], dtype="float32")

>>> out = paddle.view(x, paddle.uint8")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这一行代码无法运行

>>> print(out.shape)
[2, 4, 24]

"""
if isinstance(shape_or_dtype, (list, tuple)):
return _C_ops.view_shape(x, shape_or_dtype)
Expand Down
43 changes: 43 additions & 0 deletions test/deprecated/legacy_test/test_stride.py
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,47 @@ def call_view2(self):

self.assertTrue(out_c._is_shared_buffer_with(out))

def call_view3(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32')
x = paddle.to_tensor(x_np)
np.testing.assert_allclose(x.numpy(), x_np)

# shape inference
out = paddle.view(x, [10, 100, -1])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

可以适当增加下测试case,当前测试形状推导是4维推导3维。可以扩充到2,1维情况,或是低维到高维。

np_out = x_np.reshape(10, 100, 20)

np.testing.assert_allclose(out.numpy(), np_out)

self.assertTrue(out.is_contiguous())

self.assertTrue(x._is_shared_buffer_with(out))

out_c = out.contiguous()

np.testing.assert_allclose(out_c.numpy(), np_out)

self.assertTrue(out_c._is_shared_buffer_with(out))

def call_view4(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32')
x = paddle.to_tensor(x_np)
np.testing.assert_allclose(x.numpy(), x_np)

out = paddle.view(x, paddle.uint8)
np_out = x_np.view(np.uint8)

np.testing.assert_allclose(out.numpy(), np_out)

self.assertTrue(out.is_contiguous())

self.assertTrue(x._is_shared_buffer_with(out))

out_c = out.contiguous()

np.testing.assert_allclose(out_c.numpy(), np_out)

self.assertTrue(out_c._is_shared_buffer_with(out))

def call_view_as(self):
x_np = np.random.random(size=[10, 10, 10, 20]).astype('float32')
x = paddle.to_tensor(x_np)
Expand Down Expand Up @@ -618,6 +659,8 @@ def call_stride(self):
self.call_as_strided()
self.call_view()
self.call_view2()
self.call_view3()
self.call_view4()
self.call_view_as()
self.call_unfold()

Expand Down