Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move ConstantPadNd into ATen #10885

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions aten/src/ATen/core/aten_interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ _(aten, clamp_max) \
_(aten, clamp_min) \
_(aten, clone) \
_(aten, coalesce) \
_(aten, constant_pad_nd) \
_(aten, contiguous) \
_(aten, conv1d) \
_(aten, conv2d) \
Expand Down
74 changes: 74 additions & 0 deletions aten/src/ATen/native/ConstantPadNd.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#include "ATen/ATen.h"

namespace at { namespace native {

Tensor constant_pad_nd(const Tensor& self, IntList pad, Scalar value) {
AT_CHECK(pad.size() % 2 == 0, "Length of pad must be even but instead it equals ",
pad.size());

auto input_sizes = self.sizes();
auto l_inp = self.dim();

auto l_pad = pad.size() / 2;
auto l_diff = l_inp - l_pad;
AT_CHECK(l_inp >= l_pad, "Length of pad should be no more than twice the number of "
"dimensions of the input. Pad length is ", pad.size(), "while the input has ",
l_inp, "dimensions.");

std::vector<int64_t> new_shape;

bool all_pads_non_positive = true;

auto c_input = self;
for (int i = l_diff; i < l_inp; i++) {
auto pad_idx = 2 * (l_inp - i - 1);
if (pad[pad_idx] < 0) {
c_input = c_input.narrow(i, -pad[pad_idx], c_input.size(i) + pad[pad_idx]);
} else if (pad[pad_idx] != 0) {
all_pads_non_positive = false;
}
if (pad[pad_idx + 1] < 0) {
c_input = c_input.narrow(i, 0, c_input.size(i) + pad[pad_idx + 1]);
} else if (pad[pad_idx + 1] != 0) {
all_pads_non_positive = false;
}
}

// if none of the pads are positive we can optimize and just return the result
// of calling .narrow() on the input
if (all_pads_non_positive) {
return c_input;
}


for (int i = 0; i < l_diff; i ++) {
new_shape.emplace_back(input_sizes[i]);
}

for (int i = 0; i < l_pad; i++) {
auto pad_idx = pad.size() - ((i + 1) * 2);
auto new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1];
AT_CHECK(new_dim > 0, "The input size ", input_sizes[l_diff + i], ", plus negative padding ",
pad[pad_idx], " and ", pad[pad_idx + 1], "resulted in a negative output size, "
"which is invalid. Check dimension ", l_diff + i, "of your input.");
new_shape.emplace_back(new_dim);
}

auto output = at::empty(new_shape, self.options());
output.fill_(value);

auto c_output = output;
for (int i = l_diff; i < l_inp; i++) {
auto pad_idx = 2 * (l_inp - i - 1);
if (pad[pad_idx] > 0) {
c_output = c_output.narrow(i, pad[pad_idx], c_output.size(i) - pad[pad_idx]);
}
if (pad[pad_idx + 1] > 0) {
c_output = c_output.narrow(i, 0, c_output.size(i) - pad[pad_idx + 1]);
}
}
c_output.copy_(c_input);

This comment was marked as off-topic.

This comment was marked as off-topic.

return output;
}

}} // namespace at::native
3 changes: 3 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,9 @@
- func: cudnn_is_acceptable(Tensor self) -> bool
device_guard: false

- func: constant_pad_nd(Tensor self, IntList pad, Scalar value=0) -> Tensor
variants: function

- func: contiguous(Tensor self) -> Tensor
variants: method

Expand Down
3 changes: 3 additions & 0 deletions tools/autograd/derivatives.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -842,6 +842,9 @@
- name: _trilinear(Tensor i1, Tensor i2, Tensor i3, IntList expand1, IntList expand2, IntList expand3, IntList sumdim, int64_t unroll_dim)
i1, i2, i3: _trilinear_backward(grad, i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim, grad_input_mask)

- name: constant_pad_nd(Tensor self, IntList pad, Scalar value)
self: constant_pad_nd_backward(grad, pad)

- name: binary_cross_entropy_forward(Tensor self, Tensor target, Tensor weight, int64_t reduction)
self: binary_cross_entropy_backward(grad, self, target, weight, reduction)

Expand Down
8 changes: 8 additions & 0 deletions tools/autograd/templates/Functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <ciso646>
#include <algorithm>
#include <numeric>
#include <functional>

// ${generated_comment}

Expand Down Expand Up @@ -2007,6 +2008,13 @@ Tensor sparse_constructor_values_backward(const Tensor& sparse_grad_out, const T
return flattened_dense_grad.index_select(0, flattened_indices);
}

// Because the backward of pad(input, pads) is just pad(grad_output, [-p for p in pads])
Tensor constant_pad_nd_backward(const Tensor& grad, IntList pad) {
auto negated_pad = pad.vec();
std::transform(negated_pad.cbegin(), negated_pad.cend(), negated_pad.begin(), std::negate<int64_t>());
return at::constant_pad_nd(grad, negated_pad, 0);
}

} // anonymous namespace

${autograd_function_definitions}
Expand Down
75 changes: 0 additions & 75 deletions torch/nn/_functions/padding.py

This file was deleted.

3 changes: 1 addition & 2 deletions torch/nn/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from torch._C import _infer_size, _add_docstr
from . import _functions
from .modules import utils
from ._functions.padding import ConstantPadNd
from ._functions import vision
from ._functions.thnn.fold import Col2Im, Im2Col
from .modules.utils import _single, _pair, _triple, _list_with_default
Expand Down Expand Up @@ -2296,7 +2295,7 @@ def pad(input, pad, mode='constant', value=0):
assert len(pad) % 2 == 0, 'Padding length must be divisible by 2'
assert len(pad) // 2 <= input.dim(), 'Padding length too large'
if mode == 'constant':
return ConstantPadNd.apply(input, pad, value)
return _VF.constant_pad_nd(input, pad, value)
else:
assert value == 0, 'Padding mode "{}"" doesn\'t take in value argument'.format(mode)
if input.dim() == 3:
Expand Down
8 changes: 8 additions & 0 deletions torch/onnx/symbolic.py
Original file line number Diff line number Diff line change
Expand Up @@ -597,6 +597,14 @@ def adaptive_max_pool2d(g, input, output_size):
return g.op("GlobalMaxPool", input), None


@parse_args('v', 'is', 'f')
def constant_pad_nd(g, input, padding, value):
from torch.autograd._functions.utils import prepare_onnx_paddings
mode = "constant"
paddings = prepare_onnx_paddings(len(input.type().sizes()), padding)
return g.op("Pad", input, pads_i=paddings, mode_s=mode, value_f=value)


@parse_args('v', 'is')
def reflection_pad(g, input, padding):
from torch.autograd._functions.utils import prepare_onnx_paddings
Expand Down