Permalink
Browse files

Fixing more silent 64->32 conversions with explicit warnings (and/or

handling 64).
Change: 119759411
  • Loading branch information...
1 parent 81e345d commit 227e7db4b0d3d9ff8325b1f0f3a2ee1f91c8eb0c @dave-andersen dave-andersen committed with tensorflower-gardener Apr 13, 2016
@@ -46,8 +46,11 @@ limitations under the License.
// Call "m" for all number types that support the comparison operations "<" and
// ">".
+#define TF_CALL_INTEGRAL_TYPES(m) m(int64) m(int32) m(uint8) m(int16) m(int8)
+
#define TF_CALL_REAL_NUMBER_TYPES(m) \
- m(float) m(double) m(int64) m(int32) m(uint8) m(int16) m(int8)
+ TF_CALL_INTEGRAL_TYPES(m) \
+ m(float) m(double)
#define TF_CALL_REAL_NUMBER_TYPES_NO_INT32(m) \
m(float) m(double) m(int64) m(uint8) m(int16) m(int8)
@@ -672,6 +672,7 @@ tf_kernel_libraries(
"sample_distorted_bounding_box_op",
],
deps = [
+ ":bounds_check",
":eigen_helpers",
":image_resizer_state",
"//tensorflow/core:framework",
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/jpeg/jpeg_mem.h"
#include "tensorflow/core/platform/logging.h"
@@ -81,12 +82,21 @@ class EncodeJpegOp : public OpKernel {
errors::InvalidArgument("image must be 3-dimensional",
image.shape().DebugString()));
+ OP_REQUIRES(context, FastBoundsCheck(image.NumElements(),
+ std::numeric_limits<int32>::max()),
+ errors::InvalidArgument(
+ "Cannot encode images with >= max int32 elements"));
+
+ const int32 dim_size0 = static_cast<int32>(image.dim_size(0));
+ const int32 dim_size1 = static_cast<int32>(image.dim_size(1));
+ const int32 dim_size2 = static_cast<int32>(image.dim_size(2));
+
// Autodetect format if desired, otherwise make sure format and
// image channels are consistent.
int channels;
jpeg::CompressFlags adjusted_flags = flags_;
if (flags_.format == 0) {
- channels = image.dim_size(2);
+ channels = dim_size2;
if (channels == 1) {
adjusted_flags.format = jpeg::FORMAT_GRAYSCALE;
} else if (channels == 3) {
@@ -102,7 +112,7 @@ class EncodeJpegOp : public OpKernel {
} else { // RGB
channels = 3;
}
- OP_REQUIRES(context, channels == image.dim_size(2),
+ OP_REQUIRES(context, channels == dim_size2,
errors::InvalidArgument("format ", format_, " expects ",
channels, " channels, got ",
image.shape().DebugString()));
@@ -113,9 +123,8 @@ class EncodeJpegOp : public OpKernel {
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output));
OP_REQUIRES(context,
- jpeg::Compress(image.flat<uint8>().data(), image.dim_size(1),
- image.dim_size(0), adjusted_flags,
- &output->scalar<string>()()),
+ jpeg::Compress(image.flat<uint8>().data(), dim_size1, dim_size0,
+ adjusted_flags, &output->scalar<string>()()),
errors::Internal("JPEG encoding failed"));
}
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/png/png_io.h"
#include "tensorflow/core/platform/logging.h"
@@ -53,7 +54,20 @@ class EncodePngOp : public OpKernel {
OP_REQUIRES(context, image.dims() == 3,
errors::InvalidArgument("image must be 3-dimensional",
image.shape().DebugString()));
- const int64 channels = image.dim_size(2);
+ OP_REQUIRES(
+ context,
+ FastBoundsCheck(image.NumElements(), std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("image cannot have >= int32 max elements"));
+ const int32 height = static_cast<int32>(image.dim_size(0));
+ const int32 width = static_cast<int32>(image.dim_size(1));
+ const int32 channels = static_cast<int32>(image.dim_size(2));
+
+ // In some cases, we pass width*channels*2 to png.
+ const int32 max_row_width = std::numeric_limits<int32>::max() / 2;
+
+ OP_REQUIRES(context, FastBoundsCheck(width * channels, max_row_width),
+ errors::InvalidArgument("image too wide to encode"));
+
OP_REQUIRES(context, channels >= 1 && channels <= 4,
errors::InvalidArgument(
"image must have 1, 2, 3, or 4 channels, got ", channels));
@@ -63,20 +77,19 @@ class EncodePngOp : public OpKernel {
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output));
if (desired_channel_bits_ == 8) {
- OP_REQUIRES(context, png::WriteImageToBuffer(
- image.flat<uint8>().data(), image.dim_size(1),
- image.dim_size(0), image.dim_size(1) * channels,
- channels, desired_channel_bits_, compression_,
- &output->scalar<string>()(), nullptr),
+ OP_REQUIRES(context,
+ png::WriteImageToBuffer(image.flat<uint8>().data(), width,
+ height, width * channels, channels,
+ desired_channel_bits_, compression_,
+ &output->scalar<string>()(), nullptr),
errors::Internal("PNG encoding failed"));
} else {
- OP_REQUIRES(
- context,
- png::WriteImageToBuffer(
- image.flat<uint16>().data(), image.dim_size(1), image.dim_size(0),
- image.dim_size(1) * channels * 2, channels, desired_channel_bits_,
- compression_, &output->scalar<string>()(), nullptr),
- errors::Internal("PNG encoding failed"));
+ OP_REQUIRES(context,
+ png::WriteImageToBuffer(
+ image.flat<uint16>().data(), width, height,
+ width * channels * 2, channels, desired_channel_bits_,
+ compression_, &output->scalar<string>()(), nullptr),
+ errors::Internal("PNG encoding failed"));
}
}
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/util/guarded_philox_random.h"
@@ -120,8 +121,8 @@ bool GenerateRandomCrop(int original_width, int original_height,
const float max_area =
max_relative_crop_area * original_width * original_height;
- int height = lrintf(sqrt(min_area / aspect_ratio));
- int max_height = lrintf(sqrt(max_area / aspect_ratio));
+ int height = static_cast<int>(lrintf(sqrt(min_area / aspect_ratio)));
+ int max_height = static_cast<int>(lrintf(sqrt(max_area / aspect_ratio)));
if (lrintf(max_height * aspect_ratio) > original_width) {
// We must find the smallest max_height satisfying
@@ -143,7 +144,7 @@ bool GenerateRandomCrop(int original_width, int original_height,
// [0, max_height - height].
height += random->Uniform(max_height - height + 1);
}
- int width = lrintf(height * aspect_ratio);
+ int width = static_cast<int>(lrintf(height * aspect_ratio));
DCHECK_LE(width, original_width);
// Let us not fail if rounding error causes the area to be
@@ -152,7 +153,7 @@ bool GenerateRandomCrop(int original_width, int original_height,
float area = static_cast<float>(width * height);
if (area < min_area) {
height += 1;
- width = lrintf(height * aspect_ratio);
+ width = static_cast<int>(lrintf(height * aspect_ratio));
area = width * height;
}
@@ -161,7 +162,7 @@ bool GenerateRandomCrop(int original_width, int original_height,
// Try first with a slightly smaller rectangle first.
if (area > max_area) {
height -= 1;
- width = lrintf(height * aspect_ratio);
+ width = static_cast<int>(lrintf(height * aspect_ratio));
area = width * height;
}
@@ -247,14 +248,20 @@ class SampleDistortedBoundingBoxOp : public OpKernel {
errors::InvalidArgument("image_size must be 1-dimensional",
image_size.shape().DebugString()));
OP_REQUIRES(context, image_size.dim_size(0) == 3,
- errors::InvalidArgument("image_size must be 3-dimensional",
+ errors::InvalidArgument("image_size must contain 3 elements",
image_size.shape().DebugString()));
// Note image_size_data(2) is the depth and unused.
- typename TTypes<T, 1>::ConstTensor image_size_data =
- image_size.tensor<T, 1>();
- const int32 height = image_size_data(0);
- const int32 width = image_size_data(1);
+ const uint64 height_raw = internal::SubtleMustCopy(image_size.flat<T>()(0));
+ const uint64 width_raw = internal::SubtleMustCopy(image_size.flat<T>()(1));
+ OP_REQUIRES(context,
+ FastBoundsCheck(height_raw, std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("image height cannot be >= int32 max"));
+ OP_REQUIRES(context,
+ FastBoundsCheck(width_raw, std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("image width cannot be >= int32 max"));
+ const int32 height = static_cast<int32>(height_raw);
+ const int32 width = static_cast<int32>(width_raw);
// Ensure that the supplied bounding boxes are sane and convert them to
// Rectangles.
@@ -398,7 +405,7 @@ class SampleDistortedBoundingBoxOp : public OpKernel {
.TypeConstraint<type>("T"), \
SampleDistortedBoundingBoxOp<type>)
-TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNELS);
+TF_CALL_INTEGRAL_TYPES(REGISTER_KERNELS);
#undef REGISTER_KERNELS
} // namespace tensorflow
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
+#include "tensorflow/core/kernels/bounds_check.h"
namespace tensorflow {
@@ -35,7 +36,15 @@ class ShapeOp : public OpKernel {
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({rank}), &out));
auto vec = out->vec<int32>();
- for (int i = 0; i < rank; ++i) vec(i) = inp.dim_size(i);
+ // TODO(dga): support int64. b/28119922.
+ for (int i = 0; i < rank; ++i) {
+ int64 dim_size = inp.dim_size(i);
+ OP_REQUIRES(
+ ctx, FastBoundsCheck(dim_size, std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("Shape does not support tensors > int32max",
+ " but dim ", i, " is ", dim_size));
+ vec(i) = static_cast<int32>(dim_size);
+ }
}
bool IsExpensive() override { return false; }
@@ -75,7 +84,17 @@ class ShapeNOp : public OpKernel {
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(i, {dims}, &out));
auto vec = out->vec<int32>();
- for (int j = 0; j < dims; ++j) vec(j) = shape.dim_size(j);
+
+ // TODO(dga): support int64. b/28119922.
+ for (int j = 0; j < dims; ++j) {
+ int64 dim_size = shape.dim_size(j);
+ OP_REQUIRES(
+ ctx, FastBoundsCheck(dim_size, std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("Shape does not support tensors > int32max",
+ " but shape ", i, " dim ", j, " is ",
+ dim_size));
+ vec(j) = static_cast<int32>(dim_size);
+ }
}
}
@@ -159,8 +178,11 @@ class SizeOp : public OpKernel {
const int64 size = inp.NumElements();
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out));
- // TODO(josh11b): switch output to int64?
- out->scalar<int32>()() = size;
+ OP_REQUIRES(ctx, FastBoundsCheck(size, std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("Size does not work for tensors > "
+ "int32 max."));
+ // TODO(josh11b): switch output to int64? (b/28119922)
+ out->scalar<int32>()() = static_cast<int32>(size);
}
bool IsExpensive() override { return false; }
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/split_lib.h"
#include "tensorflow/core/lib/core/status.h"
@@ -89,18 +90,24 @@ class SplitOpBase : public OpKernel {
}
}
- std::tuple<int32, int32, int32> SetDims(const TensorShape& input_shape,
- int32 split_dim) const {
+ template <typename IndexType>
+ std::tuple<IndexType, IndexType, IndexType> SetDims(
+ const TensorShape& input_shape, int32 split_dim) const {
+ static_assert(std::is_integral<IndexType>::value,
+ "IndexType must be an integer type");
int32 prefix_dim_size = 1;
for (int i = 0; i < split_dim; ++i) {
prefix_dim_size *= input_shape.dim_size(i);
}
- int32 split_dim_size = input_shape.dim_size(split_dim);
+ // Caller must ensure that dim_size and suffix_dim_size are <
+ // std::numeric_limits<IndexType>::max()
+ IndexType split_dim_size =
+ static_cast<IndexType>(input_shape.dim_size(split_dim));
- int32 suffix_dim_size = 1;
+ IndexType suffix_dim_size = 1;
for (int i = split_dim + 1; i < input_shape.dims(); ++i) {
- suffix_dim_size *= input_shape.dim_size(i);
+ suffix_dim_size *= static_cast<IndexType>(input_shape.dim_size(i));
}
return std::make_tuple(prefix_dim_size, split_dim_size, suffix_dim_size);
}
@@ -123,16 +130,23 @@ class SplitOpCPU : public SplitOpBase<CPUDevice, T> {
const Tensor& input = context->input(1);
const TensorShape& input_shape = input.shape();
- int32 prefix_dim_size;
- int32 split_dim_size;
- int32 suffix_dim_size;
+ // Android also uses int32 indexing, so check here also.
+ OP_REQUIRES(
+ context, FastBoundsCheck(input.NumElements(),
+ std::numeric_limits<Eigen::DenseIndex>::max()),
+ errors::InvalidArgument("Split requires input size < ",
+ std::numeric_limits<Eigen::DenseIndex>::max()));
+
+ Eigen::DenseIndex prefix_dim_size;
+ Eigen::DenseIndex split_dim_size;
+ Eigen::DenseIndex suffix_dim_size;
std::tie(prefix_dim_size, split_dim_size, suffix_dim_size) =
- Base::SetDims(input_shape, split_dim);
+ Base::template SetDims<Eigen::DenseIndex>(input_shape, split_dim);
auto input_reshaped =
input.shaped<T, 3>({prefix_dim_size, split_dim_size, suffix_dim_size});
- const int32 split_dim_output_size = split_dim_size / num_split;
+ const int64 split_dim_output_size = split_dim_size / num_split;
TensorShape output_shape(input_shape);
output_shape.set_dim(split_dim, split_dim_output_size);
@@ -190,12 +204,16 @@ class SplitOpGPU : public SplitOpBase<GPUDevice, T> {
const int32 num_split = Base::num_outputs();
const Tensor& input = context->input(1);
const TensorShape& input_shape = input.shape();
+ OP_REQUIRES(context, FastBoundsCheck(input.NumElements(),
+ std::numeric_limits<int32>::max()),
+ errors::InvalidArgument("Split on GPU requires input size "
+ "< max int32"));
int32 prefix_dim_size;
int32 split_dim_size;
int32 suffix_dim_size;
std::tie(prefix_dim_size, split_dim_size, suffix_dim_size) =
- Base::SetDims(input_shape, split_dim);
+ Base::template SetDims<int32>(input_shape, split_dim);
const int32 split_dim_output_size = split_dim_size / num_split;
TensorShape output_shape(input_shape);
Oops, something went wrong.

0 comments on commit 227e7db

Please sign in to comment.