diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index 7b4764d2397af..8c1bf3facde73 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -1598,6 +1598,20 @@ Tensor any_dims_default(const Tensor &self, OptionalIntArrayRef dim, bool keepdi return allany_dims_default(self, dim, keepdim); } +Tensor& all_dims_out_default( + const Tensor &self, OptionalIntArrayRef dim, bool keepdim, Tensor &result) { + auto tmp = self.all(dim, keepdim); + at::native::resize_output(result, tmp.sizes()); + return result.copy_(tmp); +} + +Tensor& any_dims_out_default( + const Tensor &self, OptionalIntArrayRef dim, bool keepdim, Tensor &result) { + auto tmp = self.any(dim, keepdim); + at::native::resize_output(result, tmp.sizes()); + return result.copy_(tmp); +} + TORCH_IMPL_FUNC(amin_out) (const Tensor& self, IntArrayRef dim, bool keepdim, const Tensor& result) { auto iter = meta::make_reduction(self, result, dim, keepdim, self.scalar_type()); diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 2e157824cca8c..152105fa58657 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -701,6 +701,7 @@ structured: True dispatch: CPU, CUDA: all_dims_out + CompositeExplicitAutograd: all_dims_out_default cpp_no_default_args: ['dim'] - func: all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor @@ -743,6 +744,7 @@ structured: True dispatch: CPU, CUDA: any_dims_out + CompositeExplicitAutograd: any_dims_out_default cpp_no_default_args: ['dim'] - func: any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor