From e1a101676bc5fa98d7c825e3f4e5523617761924 Mon Sep 17 00:00:00 2001 From: Zafar Date: Tue, 17 Nov 2020 13:58:37 -0800 Subject: [PATCH] [quant] ReflectionPad2d (#48036) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/48036 Test Plan: Imported from OSS Reviewed By: jerryzh168 Differential Revision: D25000347 Pulled By: z-a-f fbshipit-source-id: f42bf3c6f7069385bc62609cf59d24c15734a058 --- aten/src/ATen/native/ReflectionPad.cpp | 61 ++++++++++++++++------ aten/src/ATen/native/native_functions.yaml | 5 +- test/quantization/test_quantized_op.py | 22 ++++++++ 3 files changed, 70 insertions(+), 18 deletions(-) diff --git a/aten/src/ATen/native/ReflectionPad.cpp b/aten/src/ATen/native/ReflectionPad.cpp index 0ad13bd340a1..617f1f03a0d8 100644 --- a/aten/src/ATen/native/ReflectionPad.cpp +++ b/aten/src/ATen/native/ReflectionPad.cpp @@ -346,23 +346,43 @@ void reflection_pad2d_out_template( if (input.ndimension() == 3) { /* resize output */ output.resize_({nplane, output_h, output_w}); - AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] { - reflection_pad2d_out_frame( - input.data_ptr(), output.data_ptr(), - nplane, - input_w, input_h, output_w, output_h, - pad_l, pad_t); - }); + if (input.is_quantized()) { + AT_DISPATCH_QINT_TYPES(input.scalar_type(), "qreflection_pad2d", [&] { + reflection_pad2d_out_frame( + input.data_ptr(), output.data_ptr(), + nplane, + input_w, input_h, output_w, output_h, + pad_l, pad_t); + }); + } else { + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] { + reflection_pad2d_out_frame( + input.data_ptr(), output.data_ptr(), + nplane, + input_w, input_h, output_w, output_h, + pad_l, pad_t); + }); + } } else { /* resize output */ output.resize_({nbatch, nplane, output_h, output_w}); - AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] { - reflection_pad2d_out_loop( - input.data_ptr(), output.data_ptr(), - nbatch, nplane, - input_w, input_h, output_w, output_h, - pad_l, pad_t); - }); + if (input.is_quantized()) { + AT_DISPATCH_QINT_TYPES(input.scalar_type(), "qreflection_pad2d", [&] { + reflection_pad2d_out_loop( + input.data_ptr(), output.data_ptr(), + nbatch, nplane, + input_w, input_h, output_w, output_h, + pad_l, pad_t); + }); + } else { + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] { + reflection_pad2d_out_loop( + input.data_ptr(), output.data_ptr(), + nbatch, nplane, + input_w, input_h, output_w, output_h, + pad_l, pad_t); + }); + } } } @@ -547,7 +567,18 @@ Tensor& reflection_pad2d_out_cpu( } Tensor reflection_pad2d_cpu(const Tensor& input, IntArrayRef padding) { - auto output = at::empty({0}, input.options()); + Tensor output; + if (input.is_quantized()) { + if (input.qscheme() == kPerTensorAffine) { + output = at::_empty_affine_quantized({0}, input.options(), + input.q_scale(), + input.q_zero_point()); + } else { + TORCH_CHECK(false, "Only per tensor quantization is supported"); + } + } else { + output = at::empty({0}, input.options()); + } reflection_pad2d_out_template(output, input, padding); return output; } diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 0b9627d2e612..49fe4a927730 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -8072,9 +8072,8 @@ use_c10_dispatcher: full python_module: nn dispatch: - CPU: reflection_pad1d_cpu + CPU, QuantizedCPU: reflection_pad1d_cpu CUDA: reflection_pad1d_cuda - QuantizedCPU: reflection_pad1d_cpu - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn @@ -8099,7 +8098,7 @@ use_c10_dispatcher: full python_module: nn dispatch: - CPU: reflection_pad2d_cpu + CPU, QuantizedCPU: reflection_pad2d_cpu CUDA: reflection_pad2d_cuda - func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) diff --git a/test/quantization/test_quantized_op.py b/test/quantization/test_quantized_op.py index af5312904dec..b569b98a0b8f 100644 --- a/test/quantization/test_quantized_op.py +++ b/test/quantization/test_quantized_op.py @@ -4111,6 +4111,28 @@ def test_reflection_pad1d(self, batch_size, channels, width, qtype): self.assertEqual(qy_ref, qy_hat) + @given(batch_size=st.integers(1, 64), + channels=st.integers(1, 64), + height=st.integers(16, 128), + width=st.integers(16, 128), + qtype=st.sampled_from(hu._ALL_QINT_TYPES)) + def test_reflection_pad2d(self, batch_size, channels, height, width, qtype): + padding = (width // 4, width // 4, height // 4, height // 4) + + x = torch.arange(batch_size * channels * height * width).to(torch.float) + x = x.resize(batch_size, channels, height, width) + # Per-Tensor test + scale, zp = _calculate_dynamic_qparams(x, qtype) + qx = torch.quantize_per_tensor(x, scale, zp, qtype) + + padding_op = torch.nn.ReflectionPad2d(padding) + + y_ref = padding_op(x) + qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype) + qy_hat = padding_op(qx) + + self.assertEqual(qy_ref, qy_hat) + @given(batch_size=st.integers(1, 64), channels=st.integers(1, 64), hwd=st.integers(1, 16), # For 3D, max input size would be 16x16x16