Skip to content

Commit

Permalink
fix aminmax output resize issue when input is a zero dimension tensor
Browse files Browse the repository at this point in the history
ghstack-source-id: 0287fe3a6441d97de847d78e16aea8ea4fe82b7e
Pull Request resolved: #96171
  • Loading branch information
mingfeima committed Mar 7, 2023
1 parent cc775fb commit d9a77d9
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 10 deletions.
8 changes: 8 additions & 0 deletions aten/src/ATen/native/cpu/TensorCompareKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,14 @@ static void aminmax_kernel(
"Expect min and max dtype ", self.scalar_type(),
" but got ", min_result.scalar_type(), " and ", max_result.scalar_type());

if (self.numel() == 1 && self.ndimension() == 0) {
min_result.resize_({});
max_result.resize_({});
min_result.fill_(self);
max_result.fill_(self);
return;
}

AT_DISPATCH_ALL_TYPES_AND(ScalarType::Bool, self.scalar_type(), "aminmax_cpu", [&] {
compare_base_kernel<scalar_t, scalar_t>(min_result, max_result, self, wrap_dim, keepdim, [&] (
scalar_t* min_result_data, scalar_t* max_result_data,
Expand Down
10 changes: 0 additions & 10 deletions torch/_decomp/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3337,16 +3337,6 @@ def upsample_bicubic2d_vec(
def aminmax(self, *, dim=None, keepdim=False):
amin = torch.amin(self, dim=dim, keepdim=keepdim)
amax = torch.amax(self, dim=dim, keepdim=keepdim)
if (
keepdim
and dim is not None
and self.ndimension() == 0
and self.device.type == "cpu"
):
# the behavior of aminmax differs from amin/amax for 0D tensors on CPU
# https://github.com/pytorch/pytorch/issues/96042
amin = amin.expand([1])
amax = amax.expand([1])
return amin, amax


Expand Down

0 comments on commit d9a77d9

Please sign in to comment.