Skip to content
Permalink
Browse files Browse the repository at this point in the history
Fix undefined behavior in QuantizedConv2D
Added more input validation and tests.  Prior to this, we could get
`nullptr` exceptions when attempting to access 0th elements of 0-sized
inputs, leading to security vulnerability bugs.

Also needed to modify `quantized_conv_ops_test.cc` for consistency.
Previously the CPU kernel did technically support passing tensors
of rank larger than 0 for min/max values.  However, the XLA kernels do not.

PiperOrigin-RevId: 445518507
  • Loading branch information
cantonios authored and tensorflower-gardener committed Apr 29, 2022
1 parent ec09dab commit 0f0b080
Show file tree
Hide file tree
Showing 3 changed files with 107 additions and 26 deletions.
26 changes: 20 additions & 6 deletions tensorflow/core/kernels/quantized_conv_ops.cc
Expand Up @@ -18,8 +18,6 @@ limitations under the License.
#include <algorithm>
#include <vector>

#include "tensorflow/core/platform/errors.h"

#define EIGEN_USE_THREADS

#define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
Expand All @@ -32,6 +30,7 @@ limitations under the License.
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/kernels/reference_gemm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/padding.h"

namespace tensorflow {
Expand Down Expand Up @@ -499,11 +498,26 @@ class QuantizedConv2DOp : public OpKernel {

// For 2D convolution, there should be 4 dimensions.
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
errors::InvalidArgument("input must be rank 4 but is rank ",
input.shape().dims()));
OP_REQUIRES(context, filter.dims() == 4,
errors::InvalidArgument("filter must be 4-dimensional: ",
filter.shape().DebugString()));
errors::InvalidArgument("filter must be rank 4 but is rank ",
filter.shape().dims()));

OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(2).shape()),
errors::InvalidArgument("min_input must be rank 0 but is rank ",
context->input(2).shape().dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(context->input(3).shape()),
errors::InvalidArgument("max_input must be rank 0 but is rank ",
context->input(3).shape().dims()));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(context->input(4).shape()),
errors::InvalidArgument("min_filter must be rank 0 but is rank ",
context->input(4).shape().dims()));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(context->input(5).shape()),
errors::InvalidArgument("max_filter must be rank 0 but is rank ",
context->input(5).shape().dims()));

const float min_input = context->input(2).flat<float>()(0);
const float max_input = context->input(3).flat<float>()(0);
Expand Down
40 changes: 20 additions & 20 deletions tensorflow/core/kernels/quantized_conv_ops_test.cc
Expand Up @@ -91,10 +91,10 @@ TEST_F(QuantizedConv2DTest, Small) {
image_quantized.flat<quint8>());
AddInputFromArray<quint8>(filter_quantized.shape(),
filter_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {image_min});
AddInputFromArray<float>(TensorShape({1}), {image_max});
AddInputFromArray<float>(TensorShape({1}), {filter_min});
AddInputFromArray<float>(TensorShape({1}), {filter_max});
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());

// We're sliding the 3x3 filter across the 3x4 image, with accesses outside
Expand Down Expand Up @@ -158,10 +158,10 @@ TEST_F(QuantizedConv2DTest, Small32Bit) {
AddInputFromArray<quint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{10, 40, 70, 20, 50, 80, 30, 60, 90});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});

TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
Expand Down Expand Up @@ -201,10 +201,10 @@ TEST_F(QuantizedConv2DTest, OddPadding) {
AddInputFromArray<quint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});

TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
Expand Down Expand Up @@ -244,10 +244,10 @@ TEST_F(QuantizedConv2DTest, OddPaddingBatch) {
AddInputFromArray<quint8>(
TensorShape({filter_size, filter_size, depth, filter_count}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});

TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width / stride;
Expand Down Expand Up @@ -302,10 +302,10 @@ TEST_F(QuantizedConv2DTest, SmallWithNoZero) {
image_quantized.flat<quint8>());
AddInputFromArray<quint8>(filter_quantized.shape(),
filter_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {image_min});
AddInputFromArray<float>(TensorShape({1}), {image_max});
AddInputFromArray<float>(TensorShape({1}), {filter_min});
AddInputFromArray<float>(TensorShape({1}), {filter_max});
AddInputFromArray<float>(TensorShape({}), {image_min});
AddInputFromArray<float>(TensorShape({}), {image_max});
AddInputFromArray<float>(TensorShape({}), {filter_min});
AddInputFromArray<float>(TensorShape({}), {filter_max});
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Expand Down
67 changes: 67 additions & 0 deletions tensorflow/python/ops/quantized_conv_ops_test.py
Expand Up @@ -18,6 +18,8 @@

from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test

Expand Down Expand Up @@ -196,6 +198,71 @@ def testConv2D2x2FilterStride2Same(self):
padding="SAME",
expected=expected_output)

def _testBadInputSize(self,
tin=None,
tfilter=None,
min_input=None,
max_input=None,
min_filter=None,
max_filter=None,
error_regex=""):
strides = [1, 1, 1, 1]
padding = "SAME"
if tin is None:
tin = math_ops.cast(
constant_op.constant(1, shape=[1, 2, 3, 3]), dtype=dtypes.quint8)

if tfilter is None:
tfilter = math_ops.cast(
constant_op.constant(1, shape=[1, 2, 3, 3]), dtype=dtypes.quint8)

if min_input is None:
min_input = constant_op.constant(0, shape=[], dtype=dtypes.float32)

if max_input is None:
max_input = constant_op.constant(0, shape=[], dtype=dtypes.float32)

if min_filter is None:
min_filter = constant_op.constant(0, shape=[], dtype=dtypes.float32)

if max_filter is None:
max_filter = constant_op.constant(0, shape=[], dtype=dtypes.float32)

with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
error_regex):
self.evaluate(
nn_ops.quantized_conv2d(
tin,
tfilter,
out_type=dtypes.qint32,
strides=strides,
padding=padding,
min_input=min_input,
max_input=max_input,
min_filter=min_filter,
max_filter=max_filter))

def testBadInputSizes(self):
self._testBadInputSize(
tin=math_ops.cast(
constant_op.constant(1, shape=[1, 2]), dtype=dtypes.quint8),
error_regex="must be rank 4")
self._testBadInputSize(
tfilter=math_ops.cast(
constant_op.constant(1, shape=[1, 2]), dtype=dtypes.quint8),
error_regex="must be rank 4")
self._testBadInputSize(
min_input=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
error_regex="must be rank 0")
self._testBadInputSize(
max_input=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
error_regex="must be rank 0")
self._testBadInputSize(
min_filter=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
error_regex="must be rank 0")
self._testBadInputSize(
max_filter=constant_op.constant(0, shape=[1], dtype=dtypes.float32),
error_regex="must be rank 0")

if __name__ == "__main__":
test.main()

0 comments on commit 0f0b080

Please sign in to comment.