Skip to content
Permalink
Browse files Browse the repository at this point in the history
Validate arguments to QuantizedReshape.
Ensure that validations from `Reshape` also terminate `QuantizedReshape` on failure.

PiperOrigin-RevId: 369775421
Change-Id: If8c5342267aceea65b7cb83a4b183304886f1ce8
  • Loading branch information
mihaimaruseac authored and tensorflower-gardener committed Apr 22, 2021
1 parent 2ec2ce4 commit a324ac8
Showing 1 changed file with 23 additions and 2 deletions.
25 changes: 23 additions & 2 deletions tensorflow/core/kernels/quantized_reshape_op.cc
Expand Up @@ -17,6 +17,7 @@ limitations under the License.

#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/reshape_op.h"
Expand All @@ -30,9 +31,29 @@ class QuantizedReshapeOp : public ReshapeOp {
void Compute(OpKernelContext* ctx) override {
// This call processes inputs 1 and 2 to write output 0.
ReshapeOp::Compute(ctx);
if (!ctx->status().ok()) {
return;
}

const auto& input_min_float_tensor = ctx->input(2);
const auto& input_min_float_shape = input_min_float_tensor.shape();
OP_REQUIRES(ctx,
TensorShapeUtils::IsScalar(input_min_float_shape) ||
(TensorShapeUtils::IsVector(input_min_float_shape) &&
(input_min_float_shape.dim_size(0) == 1)),
errors::InvalidArgument(
"input_min must be a scalar or a vector of 1 element"));
const float input_min_float = input_min_float_tensor.flat<float>()(0);
const auto& input_max_float_tensor = ctx->input(3);
const auto& input_max_float_shape = input_max_float_tensor.shape();
OP_REQUIRES(ctx,
TensorShapeUtils::IsScalar(input_max_float_shape) ||
(TensorShapeUtils::IsVector(input_max_float_shape) &&
(input_max_float_shape.dim_size(0) == 1)),
errors::InvalidArgument(
"input_max must be a scalar or a vector of 1 element"));
const float input_max_float = input_max_float_tensor.flat<float>()(0);

const float input_min_float = ctx->input(2).flat<float>()(0);
const float input_max_float = ctx->input(3).flat<float>()(0);
Tensor* output_min = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min));
output_min->flat<float>()(0) = input_min_float;
Expand Down

0 comments on commit a324ac8

Please sign in to comment.