Skip to content

Commit

Permalink
[quant] ReflectionPad2d (#48036)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: #48036

Test Plan: Imported from OSS

Reviewed By: jerryzh168

Differential Revision: D25000347

Pulled By: z-a-f

fbshipit-source-id: f42bf3c6f7069385bc62609cf59d24c15734a058
  • Loading branch information
z-a-f authored and facebook-github-bot committed Nov 17, 2020
1 parent cb046f7 commit e1a1016
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 18 deletions.
61 changes: 46 additions & 15 deletions aten/src/ATen/native/ReflectionPad.cpp
Expand Up @@ -346,23 +346,43 @@ void reflection_pad2d_out_template(
if (input.ndimension() == 3) {
/* resize output */
output.resize_({nplane, output_h, output_w});
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] {
reflection_pad2d_out_frame(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
nplane,
input_w, input_h, output_w, output_h,
pad_l, pad_t);
});
if (input.is_quantized()) {
AT_DISPATCH_QINT_TYPES(input.scalar_type(), "qreflection_pad2d", [&] {
reflection_pad2d_out_frame(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
nplane,
input_w, input_h, output_w, output_h,
pad_l, pad_t);
});
} else {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] {
reflection_pad2d_out_frame(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
nplane,
input_w, input_h, output_w, output_h,
pad_l, pad_t);
});
}
} else {
/* resize output */
output.resize_({nbatch, nplane, output_h, output_w});
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] {
reflection_pad2d_out_loop(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
nbatch, nplane,
input_w, input_h, output_w, output_h,
pad_l, pad_t);
});
if (input.is_quantized()) {
AT_DISPATCH_QINT_TYPES(input.scalar_type(), "qreflection_pad2d", [&] {
reflection_pad2d_out_loop(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
nbatch, nplane,
input_w, input_h, output_w, output_h,
pad_l, pad_t);
});
} else {
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "reflection_pad2d", [&] {
reflection_pad2d_out_loop(
input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
nbatch, nplane,
input_w, input_h, output_w, output_h,
pad_l, pad_t);
});
}
}
}

Expand Down Expand Up @@ -547,7 +567,18 @@ Tensor& reflection_pad2d_out_cpu(
}

Tensor reflection_pad2d_cpu(const Tensor& input, IntArrayRef padding) {
auto output = at::empty({0}, input.options());
Tensor output;
if (input.is_quantized()) {
if (input.qscheme() == kPerTensorAffine) {
output = at::_empty_affine_quantized({0}, input.options(),
input.q_scale(),
input.q_zero_point());
} else {
TORCH_CHECK(false, "Only per tensor quantization is supported");
}
} else {
output = at::empty({0}, input.options());
}
reflection_pad2d_out_template(output, input, padding);
return output;
}
Expand Down
5 changes: 2 additions & 3 deletions aten/src/ATen/native/native_functions.yaml
Expand Up @@ -8072,9 +8072,8 @@
use_c10_dispatcher: full
python_module: nn
dispatch:
CPU: reflection_pad1d_cpu
CPU, QuantizedCPU: reflection_pad1d_cpu
CUDA: reflection_pad1d_cuda
QuantizedCPU: reflection_pad1d_cpu

- func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
python_module: nn
Expand All @@ -8099,7 +8098,7 @@
use_c10_dispatcher: full
python_module: nn
dispatch:
CPU: reflection_pad2d_cpu
CPU, QuantizedCPU: reflection_pad2d_cpu
CUDA: reflection_pad2d_cuda

- func: reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
Expand Down
22 changes: 22 additions & 0 deletions test/quantization/test_quantized_op.py
Expand Up @@ -4111,6 +4111,28 @@ def test_reflection_pad1d(self, batch_size, channels, width, qtype):

self.assertEqual(qy_ref, qy_hat)

@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
height=st.integers(16, 128),
width=st.integers(16, 128),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_reflection_pad2d(self, batch_size, channels, height, width, qtype):
padding = (width // 4, width // 4, height // 4, height // 4)

x = torch.arange(batch_size * channels * height * width).to(torch.float)
x = x.resize(batch_size, channels, height, width)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)

padding_op = torch.nn.ReflectionPad2d(padding)

y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)

self.assertEqual(qy_ref, qy_hat)

@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
hwd=st.integers(1, 16), # For 3D, max input size would be 16x16x16
Expand Down

0 comments on commit e1a1016

Please sign in to comment.