Skip to content
Permalink
Browse files Browse the repository at this point in the history
Fix security vulnerability with SpaceToBatchNDOp.
PiperOrigin-RevId: 445527615
  • Loading branch information
sagunb authored and tensorflower-gardener committed Apr 29, 2022
1 parent 98f021b commit acd56b8
Show file tree
Hide file tree
Showing 9 changed files with 90 additions and 13 deletions.
24 changes: 24 additions & 0 deletions tensorflow/compiler/tests/spacetobatch_op_test.py
Expand Up @@ -17,6 +17,7 @@
import numpy as np

from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
Expand Down Expand Up @@ -145,6 +146,29 @@ def testLargerInputBatch2x2(self):
self._testOne(x_np, block_size, x_out)


class SpaceToBatchNDErrorHandlingTest(xla_test.XLATestCase):

def testInvalidBlockShape(self):
with self.assertRaisesRegex(ValueError, "block_shape must be positive"):
with self.session() as sess, self.test_scope():
tf_in = constant_op.constant(
-3.5e+35, shape=[10, 20, 20], dtype=dtypes.float32)
block_shape = constant_op.constant(-10, shape=[2], dtype=dtypes.int64)
paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32)
sess.run(array_ops.space_to_batch_nd(tf_in, block_shape, paddings))

def testOutputSizeOutOfBounds(self):
with self.assertRaisesRegex(ValueError,
"Negative.* dimension size caused by overflow"):
with self.session() as sess, self.test_scope():
tf_in = constant_op.constant(
-3.5e+35, shape=[10, 19, 22], dtype=dtypes.float32)
block_shape = constant_op.constant(
1879048192, shape=[2], dtype=dtypes.int64)
paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32)
sess.run(array_ops.space_to_batch_nd(tf_in, block_shape, paddings))


class SpaceToBatchNDTest(xla_test.XLATestCase):
"""Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops."""

Expand Down
1 change: 1 addition & 0 deletions tensorflow/compiler/tf2xla/kernels/BUILD
Expand Up @@ -211,6 +211,7 @@ tf_kernel_library(
"//tensorflow/core/kernels:stateful_random_ops_header",
"//tensorflow/core/kernels:stateless_random_ops_v2_header",
"//tensorflow/core/tpu:tpu_defs",
"//tensorflow/core/util:overflow",
"//tensorflow/stream_executor/lib",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/container:flat_hash_map",
Expand Down
20 changes: 17 additions & 3 deletions tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/xla_builder.h"
#include "tensorflow/core/util/overflow.h"

namespace tensorflow {
namespace {
Expand Down Expand Up @@ -60,10 +61,14 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
int64_t pad_end = paddings.Get<int64_t>({i, 1});
OP_REQUIRES(ctx, pad_start >= 0 && pad_end >= 0,
errors::InvalidArgument("Paddings must be non-negative"));
OP_REQUIRES(ctx, block_shape[i] >= 1,
errors::InvalidArgument(
"All values in block_shape must be positive, got value, ",
block_shape[i], " at index ", i, "."));
dim->set_edge_padding_low(pad_start);
dim->set_edge_padding_high(pad_end);
padded_shape[1 + i] += pad_start + pad_end;
block_num_elems *= block_shape[i];
block_num_elems = MultiplyWithoutOverflow(block_num_elems, block_shape[i]);
}
// Don't pad the remainder dimensions.
for (int i = 0; i < remainder_shape.size(); ++i) {
Expand All @@ -72,6 +77,16 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
OP_REQUIRES(ctx, block_num_elems > 0,
errors::InvalidArgument(
"The product of the block dimensions must be positive"));
const int64_t batch_size = input_shape[0];
const int64_t output_dim =
MultiplyWithoutOverflow(batch_size, block_num_elems);
if (output_dim < 0) {
OP_REQUIRES(
ctx, output_dim >= 0,
errors::InvalidArgument("Negative output dimension size caused by "
"overflow when multiplying ",
batch_size, " and ", block_num_elems));
}

xla::XlaOp padded =
xla::Pad(input, XlaHelpers::Zero(b, input_dtype), padding_config);
Expand All @@ -85,7 +100,6 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
// padded_shape[M] / block_shape[M-1],
// block_shape[M-1]] +
// remaining_shape
const int64_t batch_size = input_shape[0];
std::vector<int64_t> reshaped_padded_shape(input_rank + block_rank);
reshaped_padded_shape[0] = batch_size;
for (int i = 0; i < block_rank; ++i) {
Expand Down Expand Up @@ -134,7 +148,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
// Determine the length of the prefix of block dims that can be combined
// into the batch dimension due to having no padding and block_shape=1.
std::vector<int64_t> output_shape(input_rank);
output_shape[0] = batch_size * block_num_elems;
output_shape[0] = output_dim;
for (int i = 0; i < block_rank; ++i) {
output_shape[1 + i] = padded_shape[1 + i] / block_shape[i];
}
Expand Down
1 change: 1 addition & 0 deletions tensorflow/core/framework/BUILD
Expand Up @@ -891,6 +891,7 @@ cc_library(
"//tensorflow/core/lib/strings:scanner",
"//tensorflow/core/lib/strings:str_util",
"//tensorflow/core/platform:macros",
"//tensorflow/core/util:overflow",
"@com_google_absl//absl/memory",
],
)
Expand Down
3 changes: 2 additions & 1 deletion tensorflow/core/framework/shape_inference.cc
Expand Up @@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/util/overflow.h"

namespace tensorflow {
namespace shape_inference {
Expand Down Expand Up @@ -1111,7 +1112,7 @@ Status InferenceContext::Multiply(DimensionHandle first,
*out = UnknownDim();
} else {
// Invariant: Both values are known and greater than 1.
const int64_t product = first_value * second_value;
const int64_t product = MultiplyWithoutOverflow(first_value, second_value);
if (product < 0) {
return errors::InvalidArgument(
"Negative dimension size caused by overflow when multiplying ",
Expand Down
6 changes: 2 additions & 4 deletions tensorflow/core/kernels/BUILD
Expand Up @@ -29,6 +29,7 @@ load(
load(
"//third_party/mkl:build_defs.bzl",
"if_mkl",
"mkl_deps",
)

# buildifier: disable=same-origin-load
Expand Down Expand Up @@ -61,10 +62,6 @@ load(
"//tensorflow/core/platform:build_config_root.bzl",
"tf_cuda_tests_tags",
)
load(
"//third_party/mkl:build_defs.bzl",
"mkl_deps",
)
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
load(
"@local_config_rocm//rocm:build_defs.bzl",
Expand Down Expand Up @@ -4569,6 +4566,7 @@ tf_kernel_library(
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core/framework:bounds_check",
"//tensorflow/core/util:overflow",
"//third_party/eigen3",
],
)
Expand Down
22 changes: 17 additions & 5 deletions tensorflow/core/kernels/spacetobatch_op.cc
Expand Up @@ -21,8 +21,6 @@ limitations under the License.
#include <string>
#include <utility>

#include "tensorflow/core/kernels/spacetobatch_functor.h"

#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
Expand All @@ -31,8 +29,10 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/spacetobatch_functor.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/overflow.h"

namespace tensorflow {

Expand Down Expand Up @@ -99,7 +99,13 @@ Status SpaceToBatchOpCompute(OpKernelContext* context,
// Compute the product of the block_shape values.
int64_t block_shape_product = 1;
for (int block_dim = 0; block_dim < block_dims; ++block_dim) {
block_shape_product *= block_shape[block_dim];
if (block_shape[block_dim] < 1) {
return errors::InvalidArgument(
"All values in block_shape must be positive, got value, ",
block_shape[block_dim], " at index ", block_dim, ".");
}
block_shape_product =
MultiplyWithoutOverflow(block_shape_product, block_shape[block_dim]);
}
if (block_shape_product <= 0) {
return errors::InvalidArgument(
Expand Down Expand Up @@ -131,8 +137,14 @@ Status SpaceToBatchOpCompute(OpKernelContext* context,
// The actual output shape exposed to callers.
TensorShape external_output_shape;

external_output_shape.AddDim(orig_input_tensor.dim_size(0) *
block_shape_product);
const int64_t output_shape = MultiplyWithoutOverflow(
orig_input_tensor.dim_size(0), block_shape_product);
if (output_shape < 0) {
return errors::InvalidArgument(
"Negative output dimension size caused by overflow when multiplying ",
orig_input_tensor.dim_size(0), " and ", block_shape_product);
}
external_output_shape.AddDim(output_shape);

int64_t input_batch_size = orig_input_tensor.dim_size(0);
for (int block_dim = 0; block_dim < removed_prefix_block_dims; ++block_dim) {
Expand Down
3 changes: 3 additions & 0 deletions tensorflow/core/util/BUILD
Expand Up @@ -533,6 +533,9 @@ tf_cuda_library(
cc_library(
name = "overflow",
hdrs = ["overflow.h"],
visibility = [
"//tensorflow:internal",
],
deps = [
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
Expand Down
23 changes: 23 additions & 0 deletions tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py
Expand Up @@ -16,7 +16,9 @@

import numpy as np

from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
Expand Down Expand Up @@ -516,6 +518,27 @@ def testUnknown(self):
dtypes.float32, shape=(3, 2, 3, 2)), [2, 3], [[1, 1], [0, 0]])
self.assertEqual([3 * 2 * 3, 2, 1, 2], t.get_shape().as_list())

@test_util.run_in_graph_and_eager_modes
def testInvalidBlockShape(self):
tf_in = constant_op.constant(
-3.5e+35, shape=[10, 20, 20], dtype=dtypes.float32)
block_shape = constant_op.constant(-10, shape=[2], dtype=dtypes.int64)
paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"block_shape must be positive"):
array_ops.space_to_batch_nd(tf_in, block_shape, paddings)

@test_util.run_in_graph_and_eager_modes
def testOutputSizeOutOfBounds(self):
tf_in = constant_op.constant(
-3.5e+35, shape=[10, 19, 22], dtype=dtypes.float32)
block_shape = constant_op.constant(
1879048192, shape=[2], dtype=dtypes.int64)
paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"Negative.* dimension size caused by overflow"):
array_ops.space_to_batch_nd(tf_in, block_shape, paddings)


class SpaceToBatchGradientTest(test.TestCase, PythonOpImpl):

Expand Down

0 comments on commit acd56b8

Please sign in to comment.