diff --git a/aten/src/ATen/native/AveragePool2d.cpp b/aten/src/ATen/native/AveragePool2d.cpp index 9c3c30857efec..83fd83c9d070c 100644 --- a/aten/src/ATen/native/AveragePool2d.cpp +++ b/aten/src/ATen/native/AveragePool2d.cpp @@ -90,18 +90,17 @@ void avg_pool2d_out_cpu_template( bool ceil_mode, bool count_include_pad) { - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 2 && - (stride.empty() || stride.size() == 2) && - (padding.size() == 1 || padding.size() == 2), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && + (stride.empty() || stride.size() == 2) && + (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 2D or 3D (batch mode) tensor expected for input"); const int kH = safe_downcast(kernel_size[0]); - const int kW = safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kH : safe_downcast(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast(stride[0]); const int dW = stride.empty() ? kW : safe_downcast(stride[1]); @@ -236,11 +235,10 @@ Tensor& avg_pool2d_backward_out_cpu_template( bool ceil_mode, bool count_include_pad) { - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 2 && - (stride.empty() || stride.size() == 2) && - (padding.size() == 1 || padding.size() == 2), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && + (stride.empty() || stride.size() == 2) && + (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); const int64_t ndim = input.ndimension(); @@ -249,7 +247,7 @@ Tensor& avg_pool2d_backward_out_cpu_template( "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast(kernel_size[0]); - const int kW = safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kH : safe_downcast(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast(stride[0]); const int dW = stride.empty() ? kW : safe_downcast(stride[1]); diff --git a/aten/src/ATen/native/AveragePool3d.cpp b/aten/src/ATen/native/AveragePool3d.cpp index ff9fdf62b4f79..82bce4f7650a2 100644 --- a/aten/src/ATen/native/AveragePool3d.cpp +++ b/aten/src/ATen/native/AveragePool3d.cpp @@ -104,19 +104,18 @@ void avg_pool3d_out_cpu_template( bool ceil_mode, bool count_include_pad) { - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 3 && - (stride.empty() || stride.size() == 3) && - (padding.size() == 1 || padding.size() == 3), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && + (stride.empty() || stride.size() == 3) && + (padding.size() == 1 || padding.size() == 3), "avg_pool3d: all IntArrayRef sizes must be 3"); TORCH_CHECK((input_.ndimension() == 4 || input_.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int kT = safe_downcast(kernel_size[0]); - const int kH = safe_downcast(kernel_size[1]); - const int kW = safe_downcast(kernel_size[2]); + const int kH = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast(stride[0]); const int dH = stride.empty() ? kH : safe_downcast(stride[1]); @@ -293,19 +292,18 @@ Tensor& avg_pool3d_backward_out_cpu_template( bool ceil_mode, bool count_include_pad) { - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 3 && - (stride.empty() || stride.size() == 3) && - (padding.size() == 1 || padding.size() == 3), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && + (stride.empty() || stride.size() == 3) && + (padding.size() == 1 || padding.size() == 3), "avg_pool3d: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int kT = safe_downcast(kernel_size[0]); - const int kH = safe_downcast(kernel_size[1]); - const int kW = safe_downcast(kernel_size[2]); + const int kH = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast(stride[0]); const int dH = stride.empty() ? kH : safe_downcast(stride[1]); diff --git a/aten/src/ATen/native/cuda/AveragePool2d.cu b/aten/src/ATen/native/cuda/AveragePool2d.cu index 8d6d117e39e1b..d38b80fc026ac 100644 --- a/aten/src/ATen/native/cuda/AveragePool2d.cu +++ b/aten/src/ATen/native/cuda/AveragePool2d.cu @@ -114,18 +114,17 @@ void avg_pool2d_out_cuda_template( checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 2 && - (stride.empty() || stride.size() == 2) && - (padding.size() == 1 || padding.size() == 2), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && + (stride.empty() || stride.size() == 2) && + (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast(kernel_size[0]); - const int kW = safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kH : safe_downcast(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast(stride[0]); const int dW = stride.empty() ? kW : safe_downcast(stride[1]); @@ -230,18 +229,17 @@ Tensor& avg_pool2d_backward_out_cuda_template( checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 2 && - (stride.empty() || stride.size() == 2) && - (padding.size() == 1 || padding.size() == 2), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && + (stride.empty() || stride.size() == 2) && + (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast(kernel_size[0]); - const int kW = safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kH : safe_downcast(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast(stride[0]); const int dW = stride.empty() ? kW : safe_downcast(stride[1]); diff --git a/aten/src/ATen/native/cuda/AveragePool3d.cu b/aten/src/ATen/native/cuda/AveragePool3d.cu index 439d0ac2a07b5..4d81d98daf7f3 100644 --- a/aten/src/ATen/native/cuda/AveragePool3d.cu +++ b/aten/src/ATen/native/cuda/AveragePool3d.cu @@ -312,19 +312,18 @@ void avg_pool3d_out_cuda_template( checkAllSameGPU("avg_pool3d_out_cuda", {output_arg, input_arg}); - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 3 && - (stride.empty() || stride.size() == 3) && - (padding.size() == 1 || padding.size() == 3), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && + (stride.empty() || stride.size() == 3) && + (padding.size() == 1 || padding.size() == 3), "avg_pool3d: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int kT = safe_downcast(kernel_size[0]); - const int kH = safe_downcast(kernel_size[1]); - const int kW = safe_downcast(kernel_size[2]); + const int kH = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast(stride[0]); const int dH = stride.empty() ? kH : safe_downcast(stride[1]); @@ -435,11 +434,10 @@ void avg_pool3d_backward_out_cuda_template( checkAllSameGPU("avg_pool3d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); - // #20866 [JIT] stride.empty() is passed through - // #20866 [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 - TORCH_INTERNAL_ASSERT(kernel_size.size() == 3 && - (stride.empty() || stride.size() == 3) && - (padding.size() == 1 || padding.size() == 3), + // #20866, #22032: Guarantee this for the official C++ API? + TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) && + (stride.empty() || stride.size() == 3) && + (padding.size() == 1 || padding.size() == 3), "avg_pool3d: all IntArrayRef sizes must be 3"); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), @@ -453,8 +451,8 @@ void avg_pool3d_backward_out_cuda_template( gradInput.zero_(); const int kT = safe_downcast(kernel_size[0]); - const int kH = safe_downcast(kernel_size[1]); - const int kW = safe_downcast(kernel_size[2]); + const int kH = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[1]); + const int kW = kernel_size.size() == 1 ? kT : safe_downcast(kernel_size[2]); const int dT = stride.empty() ? kT : safe_downcast(stride[0]); const int dH = stride.empty() ? kH : safe_downcast(stride[1]);