From 76104b3f30c12e030bac8272b946c18753422332 Mon Sep 17 00:00:00 2001 From: Lucy Qiu Date: Fri, 7 Nov 2025 10:49:13 -0800 Subject: [PATCH] Do not copy beyond out tensor bounds (#15648) Summary: `set_all_to_value(out_data, out_step_len, value)` loops over the padding and copies a row at a time. Add some checks to make sure we do not exceed the out tensor (out_data) size when copying. Reviewed By: JacobSzwejbka Differential Revision: D86436435 --- kernels/portable/cpu/op_constant_pad_nd.cpp | 38 ++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/kernels/portable/cpu/op_constant_pad_nd.cpp b/kernels/portable/cpu/op_constant_pad_nd.cpp index 7da10456e58..7209e8e42e5 100644 --- a/kernels/portable/cpu/op_constant_pad_nd.cpp +++ b/kernels/portable/cpu/op_constant_pad_nd.cpp @@ -30,6 +30,7 @@ void set_all_to_value(CTYPE* out_data, size_t step_len, CTYPE value) { template void apply_padding_to_dim( + KernelRuntimeContext& ctx, size_t ndim, const CTYPE* self_data, IntArrayRef self_sizes, @@ -57,7 +58,20 @@ void apply_padding_to_dim( size_t out_step_len = out_strides[dim]; size_t in_step_len = self_strides[dim]; - for ([[maybe_unused]] const auto i : c10::irange(pad_before)) { + // Do not copy padding beyond the out tensor bounds. + if (pad_before > 0) { + size_t numel = 1; + for (ET_UNUSED const auto i : c10::irange(out_sizes.size())) { + numel *= out_sizes[i]; + } + ET_KERNEL_CHECK_MSG( + ctx, + numel >= pad_before * out_step_len, + InvalidArgument, + /* void */, + "Out tensor is too small for the requested padding."); + } + for (ET_UNUSED const auto i : c10::irange(pad_before)) { set_all_to_value(out_data, out_step_len, value); out_data += out_step_len; } @@ -76,8 +90,9 @@ void apply_padding_to_dim( } // Otherwise, call this function recursively else { - for ([[maybe_unused]] const auto i : c10::irange(self_sizes[dim])) { + for (ET_UNUSED const auto i : c10::irange(self_sizes[dim])) { apply_padding_to_dim( + ctx, ndim, self_data, self_sizes, @@ -95,7 +110,20 @@ void apply_padding_to_dim( } } - for ([[maybe_unused]] const auto i : c10::irange(pad_after)) { + // Do not copy padding beyond the out tensor bounds. + if (pad_after > 0) { + size_t numel = 1; + for (ET_UNUSED const auto i : c10::irange(out_sizes.size())) { + numel *= out_sizes[i]; + } + ET_KERNEL_CHECK_MSG( + ctx, + numel >= pad_after * out_step_len, + InvalidArgument, + /* void */, + "Out tensor is too small for the requested padding."); + } + for (ET_UNUSED const auto i : c10::irange(pad_after)) { set_all_to_value(out_data, out_step_len, value); out_data += out_step_len; } @@ -103,6 +131,7 @@ void apply_padding_to_dim( template void constant_pad_nd_out_impl( + KernelRuntimeContext& ctx, const Tensor& self, IntArrayRef pad, CTYPE value_v, @@ -145,6 +174,7 @@ void constant_pad_nd_out_impl( IntArrayRef out_strides_ref(out_strides, ndim); apply_padding_to_dim( + ctx, ndim, self_data, self_sizes_ref, @@ -192,7 +222,7 @@ Tensor& constant_pad_nd_out( utils::internal::check_overflow_scalar_cast(value); ET_KERNEL_CHECK(ctx, opt_value_casted.has_value(), InvalidArgument, ); auto value_casted = opt_value_casted.value(); - constant_pad_nd_out_impl(in, pad, value_casted, out); + constant_pad_nd_out_impl(ctx, in, pad, value_casted, out); }); return out;