Skip to content
Permalink
Browse files Browse the repository at this point in the history
[tflite]: Insert nullptr checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.

We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).

PiperOrigin-RevId: 332517854
Change-Id: Ic27221dd1f0fbe302f311c2fe5a846ed8ff02016
  • Loading branch information
mihaimaruseac authored and tensorflower-gardener committed Sep 18, 2020
1 parent ec98fee commit e11f555
Show file tree
Hide file tree
Showing 19 changed files with 515 additions and 196 deletions.
48 changes: 32 additions & 16 deletions tensorflow/lite/delegates/delegate_test.cc
Expand Up @@ -42,9 +42,12 @@ TfLiteRegistration AddOpRegistration() {

reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Set output size to input size
const TfLiteTensor* input1 = GetInput(context, node, 0);
const TfLiteTensor* input2 = GetInput(context, node, 1);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));

TF_LITE_ENSURE_EQ(context, input1->dims->size, input2->dims->size);
for (int i = 0; i < input1->dims->size; ++i) {
Expand All @@ -58,13 +61,16 @@ TfLiteRegistration AddOpRegistration() {

reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
// Copy input data to output data.
const TfLiteTensor* a0 = GetInput(context, node, 0);
const TfLiteTensor* a0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
TF_LITE_ENSURE(context, a0);
TF_LITE_ENSURE(context, a0->data.f);
const TfLiteTensor* a1 = GetInput(context, node, 1);
const TfLiteTensor* a1;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &a1));
TF_LITE_ENSURE(context, a1);
TF_LITE_ENSURE(context, a1->data.f);
TfLiteTensor* out = GetOutput(context, node, 0);
TfLiteTensor* out;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out));
TF_LITE_ENSURE(context, out);
TF_LITE_ENSURE(context, out->data.f);
int num = a0->dims->data[0];
Expand Down Expand Up @@ -267,7 +273,8 @@ class TestDelegate : public ::testing::Test {
a0 = GetInput(context, node, 0);
a1 = a0;
}
TfLiteTensor* out = GetOutput(context, node, 0);
TfLiteTensor* out;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out));
int num = 1;
for (int i = 0; i < a0->dims->size; ++i) {
num *= a0->dims->data[i];
Expand All @@ -289,8 +296,10 @@ class TestDelegate : public ::testing::Test {
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Shapes should already by propagated by the runtime, just need to
// check.
const TfLiteTensor* input1 = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input1));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const int input_dims_size = input1->dims->size;
TF_LITE_ENSURE(context, output->dims->size == input_dims_size);
for (int i = 0; i < input_dims_size; ++i) {
Expand All @@ -315,7 +324,8 @@ class TestDelegate : public ::testing::Test {
input1 = GetInput(context, node, 0);
input2 = input1;
}
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));

TF_LITE_ENSURE_STATUS(context->ResizeTensor(
context, output, TfLiteIntArrayCopy(input1->dims)));
Expand Down Expand Up @@ -1169,11 +1179,14 @@ class TestDelegateWithDynamicTensors : public ::testing::Test {

reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
// Output 0 is dynamic
TfLiteTensor* output0 = GetOutput(context, node, 0);
TfLiteTensor* output0;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output0));
SetTensorToDynamic(output0);
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &output1));
TF_LITE_ENSURE_STATUS(context->ResizeTensor(
context, output1, TfLiteIntArrayCopy(input->dims)));
return kTfLiteOk;
Expand All @@ -1193,11 +1206,14 @@ class TestDelegateWithDynamicTensors : public ::testing::Test {
// If tensors are resized, the runtime should propagate shapes
// automatically if correct flag is set. Ensure values are correct.
// Output 0 should be dynamic.
TfLiteTensor* output0 = GetOutput(context, node, 0);
TfLiteTensor* output0;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output0));
TF_LITE_ENSURE(context, IsDynamicTensor(output0));
// Output 1 has the same shape as input.
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output1 = GetOutput(context, node, 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &output1));
TF_LITE_ENSURE(context, input->dims->size == output1->dims->size);
TF_LITE_ENSURE(context, input->dims->data[0] == output1->dims->data[0]);
return kTfLiteOk;
Expand Down
8 changes: 7 additions & 1 deletion tensorflow/lite/delegates/gpu/common/model_builder.cc
Expand Up @@ -1166,6 +1166,9 @@ class MulOperationParser : public TFLiteOperationParser {
}
auto input0 = tflite::GetInput(context, tflite_node, 0);
auto input1 = tflite::GetInput(context, tflite_node, 1);
if (input0 == nullptr || input1 == nullptr) {
return absl::InvalidArgumentError("At least one input tensor is null");
}
if (input0->dims->size == input1->dims->size) {
// this code checks that at least one input of Mul not smaller in all
// dimensions. Sometimes Mul used for matrix-vector multiplication that we
Expand Down Expand Up @@ -1380,7 +1383,10 @@ class PadOperationParser : public TFLiteOperationParser {
RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
/*runtime_inputs=*/1, /*outputs=*/1));
RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1));
auto pad_tensor = tflite::GetInput(context, tflite_node, 1);
const TfLiteTensor* pad_tensor = tflite::GetInput(context, tflite_node, 1);
if (pad_tensor == nullptr) {
return absl::InvalidArgumentError("Padding tensor was null");
}
if (pad_tensor->dims->size != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor dimension: expected 2 dim, got ",
Expand Down
Expand Up @@ -328,7 +328,9 @@ bool IsConvolutionOpSupported(const TfLiteRegistration* registration,
const int kOutputShapeTensor = 0; // Only used for TransposeConv
const int kWeightTensor = 1;
const int kBiasTensor = 2; // Only used for non-TransposeConv
const TfLiteTensor* weights = GetInput(context, node, kWeightTensor);
const TfLiteTensor* weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightTensor, &weights));
const int max_kernel_size = 16384;
if (!IsConstantTensor(weights)) {
return false;
Expand Down
Expand Up @@ -153,8 +153,10 @@ bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration,
if (fc_params->weights_format != kTfLiteFullyConnectedWeightsFormatDefault) {
return false;
}
const TfLiteTensor* input = GetInput(context, node, kInput);
const TfLiteTensor* weights = GetInput(context, node, kWeights);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInput, &input));
const TfLiteTensor* weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kWeights, &weights));

if (!IsFloatType(input->type)) {
return false;
Expand All @@ -169,7 +171,8 @@ bool IsFullyConnectedOpSupported(const TfLiteRegistration* registration,
}

if (node->inputs->size > 2) {
const TfLiteTensor* bias = GetInput(context, node, kBias);
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBias, &bias));
if (!IsFloatType(bias->type) || !IsConstantTensor(bias)) {
return false;
}
Expand Down
Expand Up @@ -97,7 +97,8 @@ OpBuilder* CreateMirrorPadOpBuilder(GraphBuilder* graph_builder) {
bool IsPadOpSupported(const TfLiteRegistration* registration,
const TfLiteNode* node, TfLiteContext* context) {
// padding is d x 2 tensor, where d is the dimension of input.
const TfLiteTensor* padding = GetInput(context, node, 1);
const TfLiteTensor* padding;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding));
if (!IsConstantTensor(padding)) {
TF_LITE_KERNEL_LOG(context,
"%s: Only constant padding is supported for PAD.",
Expand Down
Expand Up @@ -126,7 +126,8 @@ bool IsReshapeOpSupported(const TfLiteRegistration* registration,
}

const int kShapeTensor = 1;
const auto* shape = GetInput(context, node, kShapeTensor);
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShapeTensor, &shape));
if (shape->allocation_type != kTfLiteMmapRo) {
TF_LITE_KERNEL_LOG(context, "Reshape has non-const shape.");
return false;
Expand Down
49 changes: 34 additions & 15 deletions tensorflow/lite/experimental/kernels/ctc_beam_search_decoder.cc
Expand Up @@ -62,14 +62,17 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// The outputs should be top_paths * 3 + 1.
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1);

const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor);
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3);
// TensorFlow only supports float.
TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32);
const int batch_size = SizeOfDimension(inputs, 1);

const TfLiteTensor* sequence_length =
GetInput(context, node, kSequenceLengthTensor);
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1);
TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size);
// TensorFlow only supports int32.
Expand All @@ -78,17 +81,23 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Resize decoded outputs.
// Do not resize indices & values cause we don't know the values yet.
for (int i = 0; i < top_paths; ++i) {
TfLiteTensor* indices = GetOutput(context, node, i);
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &indices));
SetTensorToDynamic(indices);
TfLiteTensor* values = GetOutput(context, node, i + top_paths);
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, i + top_paths, &values));
SetTensorToDynamic(values);
TfLiteTensor* output_shape = GetOutput(context, node, i + 2 * top_paths);
TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + 2 * top_paths,
&output_shape));
SetTensorToDynamic(output_shape);
}

// Resize log probability outputs.
TfLiteTensor* log_probability_output =
GetOutput(context, node, top_paths * 3);
TfLiteTensor* log_probability_output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, top_paths * 3,
&log_probability_output));
TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2);
log_probability_output_shape_array->data[0] = batch_size;
log_probability_output_shape_array->data[1] = top_paths;
Expand Down Expand Up @@ -127,13 +136,18 @@ TfLiteStatus StoreAllDecodedSequences(
const int32_t p_num = num_entries[p];

// Resize the decoded outputs.
TfLiteTensor* indices = GetOutput(context, node, p);
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));

TfLiteTensor* values = GetOutput(context, node, p + top_paths);
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, p + top_paths, &values));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));

TfLiteTensor* decoded_shape = GetOutput(context, node, p + 2 * top_paths);
TfLiteTensor* decoded_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths,
&decoded_shape));
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));

int32_t max_decoded = 0;
Expand Down Expand Up @@ -161,9 +175,12 @@ TfLiteStatus StoreAllDecodedSequences(
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* inputs = GetInput(context, node, kInputsTensor);
const TfLiteTensor* sequence_length =
GetInput(context, node, kSequenceLengthTensor);
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);

Expand Down Expand Up @@ -207,7 +224,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
std::vector<std::vector<std::vector<int>>> best_paths(batch_size);
std::vector<float> log_probs;

TfLiteTensor* log_probabilities = GetOutput(context, node, 3 * top_paths);
TfLiteTensor* log_probabilities;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, 3 * top_paths, &log_probabilities));
float* log_probabilities_output = GetTensorData<float>(log_probabilities);

// Assumption: the blank index is num_classes - 1
Expand Down

0 comments on commit e11f555

Please sign in to comment.