75 changes: 55 additions & 20 deletions tensorflow/lite/kernels/fully_connected.cc
Original file line number Diff line number Diff line change
Expand Up @@ -155,13 +155,18 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
: 2;
TF_LITE_ENSURE_EQ(context, node->outputs->size, expected_outputs_count);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* bias =
(node->inputs->size == 3)
? GetOptionalInputTensor(context, node, kBiasTensor)
: nullptr;
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// Check proper datatype match among all Input Tensors
TF_LITE_ENSURE_STATUS(
Expand Down Expand Up @@ -214,7 +219,9 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
node->temporaries = TfLiteIntArrayCreate(5);
node->temporaries->data[0] = data->scratch_tensor_index;

TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/0,
&input_quantized));
input_quantized->type = filter->type;
input_quantized->allocation_type = kTfLiteArenaRw;

Expand All @@ -223,7 +230,9 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
input_quantized_size));

node->temporaries->data[1] = data->scratch_tensor_index + 1;
TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;

Expand All @@ -236,7 +245,9 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
}

node->temporaries->data[2] = data->scratch_tensor_index + 2;
TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/2, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
Expand All @@ -250,7 +261,9 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
}

node->temporaries->data[3] = data->scratch_tensor_index + 3;
TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/3);
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/3, &input_offsets));
input_offsets->type = kTfLiteInt32;
input_offsets->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) {
Expand All @@ -260,7 +273,9 @@ TfLiteStatus PrepareImpl(TfLiteContext* context, TfLiteNode* node) {
input_offsets_size));
}
node->temporaries->data[4] = data->scratch_tensor_index + 4;
TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/4);
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/4, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[1] = {num_units};
Expand Down Expand Up @@ -300,8 +315,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Check for supported activation types.
auto* params =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const bool is_quantized =
((filter->type == kTfLiteUInt8) || (filter->type == kTfLiteInt8));
const bool is_hybrid = is_quantized && (input->type == kTfLiteFloat32);
Expand Down Expand Up @@ -484,11 +502,21 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
int32_t output_offset = output->params.zero_point;
// Only the Pie path supports quantized models and float inputs/outputs.
if (input->type == kTfLiteFloat32) {
TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/3);
TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/4);
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/0,
&input_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, /*index=*/1,
&scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/2, &accum_scratch));
TfLiteTensor* input_offsets;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/3, &input_offsets));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/4, &row_sums));
return EvalHybrid(context, node, params, data, input, filter, bias,
input_quantized, scaling_factors, accum_scratch, row_sums,
input_offsets, output);
Expand Down Expand Up @@ -693,13 +721,18 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* filter;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kWeightsTensor, &filter));
const TfLiteTensor* bias =
(node->inputs->size == 3)
? GetOptionalInputTensor(context, node, kBiasTensor)
: nullptr;
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (filter->type) {
case kTfLiteFloat32:
Expand All @@ -708,8 +741,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
case kTfLiteUInt8:
if (params->weights_format ==
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8) {
TfLiteTensor* shuffled_input_workspace =
GetOutput(context, node, kShuffledInputWorkspaceTensor);
TfLiteTensor* shuffled_input_workspace;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kShuffledInputWorkspaceTensor,
&shuffled_input_workspace));
return EvalShuffledQuantized<kernel_type>(context, node, params, data,
input, filter, bias, output,
shuffled_input_workspace);
Expand Down
22 changes: 16 additions & 6 deletions tensorflow/lite/kernels/gather.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

const auto* params =
reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* positions = GetInput(context, node, kInputPositions);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* positions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputPositions, &positions));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (positions->type) {
case kTfLiteInt64:
Expand Down Expand Up @@ -132,9 +137,14 @@ TfLiteStatus GatherStrings(TfLiteContext* context, const TfLiteTensor* input,
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const auto* params =
reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* positions = GetInput(context, node, kInputPositions);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* positions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputPositions, &positions));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (positions->type == kTfLiteInt32) {
switch (input->type) {
Expand Down
20 changes: 14 additions & 6 deletions tensorflow/lite/kernels/gather_nd.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,13 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* params = GetInput(context, node, kParams);
const TfLiteTensor* indices = GetInput(context, node, kIndices);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* params;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, &params));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (params->type) {
case kTfLiteFloat32:
Expand Down Expand Up @@ -140,9 +144,13 @@ TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params,
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* params = GetInput(context, node, kParams);
const TfLiteTensor* indices = GetInput(context, node, kIndices);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* params;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, &params));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (indices->type) {
case kTfLiteInt32:
Expand Down
31 changes: 21 additions & 10 deletions tensorflow/lite/kernels/hashtable_lookup.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ limitations under the License.
#include <cstring>

#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"

Expand All @@ -54,28 +55,33 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);

const TfLiteTensor* lookup = GetInput(context, node, 0);
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1);
TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32);

const TfLiteTensor* key = GetInput(context, node, 1);
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));
TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1);
TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32);

const TfLiteTensor* value = GetInput(context, node, 2);
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0),
SizeOfDimension(value, 0));
if (value->type == kTfLiteString) {
TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1);
}

TfLiteTensor* hits = GetOutput(context, node, 1);
TfLiteTensor* hits;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));
TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8);
TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1);
hitSize->data[0] = SizeOfDimension(lookup, 0);

TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_EQ(context, value->type, output->type);

TfLiteStatus status = kTfLiteOk;
Expand All @@ -94,11 +100,16 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteTensor* hits = GetOutput(context, node, 1);
const TfLiteTensor* lookup = GetInput(context, node, 0);
const TfLiteTensor* key = GetInput(context, node, 1);
const TfLiteTensor* value = GetInput(context, node, 2);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteTensor* hits;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));

const int num_rows = SizeOfDimension(value, 0);
const int row_bytes = value->bytes / num_rows;
Expand Down
25 changes: 17 additions & 8 deletions tensorflow/lite/kernels/if.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"

namespace tflite {
Expand Down Expand Up @@ -52,7 +53,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, node->inputs->size > 0);

// The first input is the condition.
const TfLiteTensor* cond = GetInput(context, node, 0);
const TfLiteTensor* cond;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &cond));
// Currently only bool is supported.
// TODO(ycling): Support other types since TensorFlow also support
// non-bool types as condition.
Expand Down Expand Up @@ -83,7 +85,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < num_inputs; ++i) {
// The first input of the node is the condition. The indices of the inputs
// passed to the subgraphs are offset by 1.
const TfLiteTensor* input = GetInput(context, node, i + 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i + 1, &input));
std::vector<int> dims(input->dims->data,
input->dims->data + input->dims->size);
subgraph->ResizeInputTensor(i, dims);
Expand Down Expand Up @@ -113,7 +116,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

for (int i = 0; i < num_outputs; ++i) {
TfLiteTensor* output = GetOutput(context, node, i);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
if (has_dynamic_output_tensors) {
SetTensorToDynamic(output);
} else {
Expand All @@ -133,7 +137,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* cond = GetInput(context, node, 0);
const TfLiteTensor* cond;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &cond));
bool cond_value = cond->data.b[0];

Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
Expand All @@ -147,7 +152,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph& active_branch_subgraph =
*(*subgraphs)[active_branch_subgraph_index];
for (int i = 0; i < active_branch_subgraph.inputs().size(); ++i) {
const TfLiteTensor* input = GetInput(context, node, i + 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i + 1, &input));
TfLiteTensor* subgraph_input =
active_branch_subgraph.tensor(active_branch_subgraph.inputs()[i]);
TF_LITE_ENSURE_EQ(context, input->bytes, subgraph_input->bytes);
Expand All @@ -164,7 +170,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {

bool has_dynamic_output_tensors = false;
for (int i = 0; i < node->outputs->size; ++i) {
TfLiteTensor* output = GetOutput(context, node, i);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
if (IsDynamicTensor(output)) {
has_dynamic_output_tensors = true;
break;
Expand All @@ -173,7 +180,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {

if (has_dynamic_output_tensors) {
for (int i = 0; i < node->outputs->size; ++i) {
TfLiteTensor* output = GetOutput(context, node, i);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
TfLiteTensor* subgraph_output =
active_branch_subgraph.tensor(active_branch_subgraph.outputs()[i]);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(subgraph_output->dims);
Expand All @@ -185,7 +193,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < active_branch_subgraph.outputs().size(); ++i) {
const TfLiteTensor* subgraph_output =
active_branch_subgraph.tensor(active_branch_subgraph.outputs()[i]);
TfLiteTensor* output = GetOutput(context, node, i);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
TF_LITE_ENSURE_EQ(context, output->bytes, subgraph_output->bytes);
memcpy(output->data.raw, subgraph_output->data.raw, output->bytes);
}
Expand Down
14 changes: 10 additions & 4 deletions tensorflow/lite/kernels/l2norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE(context, NumDimensions(input) <= 4);

Expand Down Expand Up @@ -74,8 +77,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// TODO(b/143912164): instead of hardcode the epsilon here, we should read it
// from tensorflow, i.e., adding a params.
Expand Down
14 changes: 10 additions & 4 deletions tensorflow/lite/kernels/local_response_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);

Expand All @@ -61,8 +64,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLocalResponseNormParams*>(node->builtin_data);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (output->type == kTfLiteFloat32) {
#define TF_LITE_LOCAL_RESPONSE_NORM(type) \
Expand Down
24 changes: 18 additions & 6 deletions tensorflow/lite/kernels/logical.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// Reinterprete the opaque data provided by user.
OpData* data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);

Expand Down Expand Up @@ -84,9 +90,15 @@ TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node,
bool (*func)(bool, bool)) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>(
Expand Down
22 changes: 15 additions & 7 deletions tensorflow/lite/kernels/lsh_projection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,22 +73,26 @@ TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* hash = GetInput(context, node, 0);
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2);
// Support up to 32 bits.
TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32);

const TfLiteTensor* input = GetInput(context, node, 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);

if (NumInputs(node) == 3) {
const TfLiteTensor* weight = GetInput(context, node, 2);
const TfLiteTensor* weight;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight));
TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0),
SizeOfDimension(input, 0));
}

TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
switch (params->type) {
case kTfLiteLshProjectionSparse:
Expand Down Expand Up @@ -170,9 +174,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data);

int32_t* out_buf = GetOutput(context, node, 0)->data.i32;
const TfLiteTensor* hash = GetInput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 1);
TfLiteTensor* out_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
int32_t* out_buf = out_tensor->data.i32;
const TfLiteTensor* hash;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));
const TfLiteTensor* weight =
NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2);

Expand Down
479 changes: 326 additions & 153 deletions tensorflow/lite/kernels/lstm.cc

Large diffs are not rendered by default.

14 changes: 10 additions & 4 deletions tensorflow/lite/kernels/matrix_diag.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,15 @@ constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 1);

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
// Resize the output tensor.
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1);
for (int i = 0; i < input_dims_size; i++) {
Expand Down Expand Up @@ -116,8 +119,11 @@ void FillDiagHelper(const TfLiteTensor* input, TfLiteTensor* output) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
FillDiagHelper(input, output);
return kTfLiteOk;
}
Expand Down
18 changes: 13 additions & 5 deletions tensorflow/lite/kernels/matrix_set_diag.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,15 @@ constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 2);

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size);
for (int i = 0; i < input_dims_size; i++) {
Expand Down Expand Up @@ -126,9 +129,14 @@ void FillDiagHelper(const TfLiteTensor* input, const TfLiteTensor* diag,
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* diag;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDiagonalTensor, &diag));
FillDiagHelper(input, diag, output);
return kTfLiteOk;
}
Expand Down
24 changes: 18 additions & 6 deletions tensorflow/lite/kernels/mfcc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav);
const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input_wav;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorWav, &input_wav));
const TfLiteTensor* input_rate;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorRate, &input_rate));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE_EQ(context, NumDimensions(input_wav), 3);
TF_LITE_ENSURE_EQ(context, NumElements(input_rate), 1);
Expand All @@ -101,9 +107,15 @@ template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data);

const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav);
const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input_wav;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorWav, &input_wav));
const TfLiteTensor* input_rate;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorRate, &input_rate));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

const int32 sample_rate = *GetTensorData<int>(input_rate);

Expand Down
18 changes: 12 additions & 6 deletions tensorflow/lite/kernels/mirror_pad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,10 @@ struct MirrorPadWorkerTask : cpu_backend_threadpool::Task {

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
ruy::profiler::ScopeLabel label("MirrorPad");
const TfLiteTensor* input_tensor = GetInput(context, node, 0);
const TfLiteTensor* padding_matrix = GetInput(context, node, 1);
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
auto* params =
reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data);

Expand All @@ -172,7 +174,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
}
const int input_dims = NumDimensions(input_tensor);

TfLiteTensor* output_tensor = GetOutput(context, node, 0);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
if (IsDynamicTensor(output_tensor)) {
auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix);
if (output_size == nullptr) {
Expand Down Expand Up @@ -258,9 +261,12 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
void Free(TfLiteContext* context, void* buffer) {}

TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor = GetInput(context, node, 0);
const TfLiteTensor* padding_matrix = GetInput(context, node, 1);
TfLiteTensor* output_tensor = GetOutput(context, node, 0);
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
const TfLiteTensor* padding_matrix;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &padding_matrix));
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));

TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0),
Expand Down
24 changes: 18 additions & 6 deletions tensorflow/lite/kernels/mul.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);

Expand Down Expand Up @@ -259,9 +265,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
EvalMul<kernel_type>(context, node, params, data, input1, input2, output);
Expand Down
14 changes: 10 additions & 4 deletions tensorflow/lite/kernels/neg.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,23 @@ constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
Expand Down
123 changes: 81 additions & 42 deletions tensorflow/lite/kernels/non_max_suppression.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,20 +79,25 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

// Boxes & Scores.
const TfLiteTensor* input_boxes = GetInput(context, node, kInputTensorBoxes);
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
TF_LITE_ENSURE_EQ(context, input_boxes->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_boxes), 2);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input_boxes, 1), 4);
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores =
GetInput(context, node, kInputTensorScores);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
TF_LITE_ENSURE_EQ(context, input_scores->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_scores), 1);
TF_LITE_ENSURE_EQ(context, num_boxes, SizeOfDimension(input_scores, 0));

// Max output size.
const TfLiteTensor* input_max_output_size =
GetInput(context, node, kInputTensorMaxOutputSize);
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
TF_LITE_ENSURE_EQ(context, input_max_output_size->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_max_output_size), 0);
const bool is_max_output_size_const = IsConstantTensor(input_max_output_size);
Expand All @@ -103,30 +108,43 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

// IoU & Score thresholds.
const TfLiteTensor* input_iou_threshold =
GetInput(context, node, kInputTensorIouThreshold);
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_iou_threshold), 0);
const TfLiteTensor* input_score_threshold =
GetInput(context, node, kInputTensorScoreThreshold);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
TF_LITE_ENSURE_EQ(context, input_iou_threshold->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_score_threshold), 0);

if (is_soft_nms) {
const TfLiteTensor* input_sigma =
GetInput(context, node, kInputTensorSigma);
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
TF_LITE_ENSURE_EQ(context, input_sigma->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, NumDimensions(input_sigma), 0);

TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3);
TfLiteTensor* output_selected_indices =
GetOutput(context, node, kSoftNMSOutputTensorSelectedIndices);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_selected_scores =
GetOutput(context, node, kSoftNMSOutputTensorSelectedScores);
TfLiteTensor* output_selected_scores;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
output_selected_scores->type = kTfLiteFloat32;
TfLiteTensor* output_num_selected_indices =
GetOutput(context, node, kSoftNMSOutputTensorNumSelectedIndices);
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});

Expand All @@ -139,11 +157,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
} else {
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output_selected_indices =
GetOutput(context, node, kNMSOutputTensorSelectedIndices);
TfLiteTensor* output_selected_indices;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
output_selected_indices->type = kTfLiteInt32;
TfLiteTensor* output_num_selected_indices =
GetOutput(context, node, kNMSOutputTensorNumSelectedIndices);
TfLiteTensor* output_num_selected_indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
output_num_selected_indices->type = kTfLiteInt32;
SetTensorSizes(context, output_num_selected_indices, {});

Expand Down Expand Up @@ -179,42 +201,57 @@ void ResetUnusedElementsToZeroes(const int max_output_size,
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const bool is_soft_nms = NumInputs(node) == 6;

const TfLiteTensor* input_boxes = GetInput(context, node, kInputTensorBoxes);
const TfLiteTensor* input_boxes;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorBoxes, &input_boxes));
const int num_boxes = SizeOfDimension(input_boxes, 0);
const TfLiteTensor* input_scores =
GetInput(context, node, kInputTensorScores);
const TfLiteTensor* input_max_output_size =
GetInput(context, node, kInputTensorMaxOutputSize);
const TfLiteTensor* input_scores;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorScores, &input_scores));
const TfLiteTensor* input_max_output_size;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorMaxOutputSize,
&input_max_output_size));
const int max_output_size_value = *GetTensorData<int>(input_max_output_size);
TF_LITE_ENSURE(context, (max_output_size_value >= 0));
const bool is_max_output_size_const = IsConstantTensor(input_max_output_size);
const TfLiteTensor* input_iou_threshold =
GetInput(context, node, kInputTensorIouThreshold);
const TfLiteTensor* input_iou_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorIouThreshold,
&input_iou_threshold));
const float iou_threshold = *GetTensorData<float>(input_iou_threshold);
const TfLiteTensor* input_score_threshold =
GetInput(context, node, kInputTensorScoreThreshold);
const TfLiteTensor* input_score_threshold;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorScoreThreshold,
&input_score_threshold));
const float score_threshold = *GetTensorData<float>(input_score_threshold);

TfLiteTensor* output_selected_indices = nullptr;
TfLiteTensor* output_selected_scores = nullptr;
TfLiteTensor* output_num_selected_indices = nullptr;

if (is_soft_nms) {
const TfLiteTensor* input_sigma =
GetInput(context, node, kInputTensorSigma);
const TfLiteTensor* input_sigma;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kInputTensorSigma, &input_sigma));
const float soft_nms_sigma = *GetTensorData<float>(input_sigma);
if (soft_nms_sigma < 0) {
context->ReportError(context, "Invalid sigma value for soft NMS: %f",
soft_nms_sigma);
return kTfLiteError;
}

output_selected_indices =
GetOutput(context, node, kSoftNMSOutputTensorSelectedIndices);
output_selected_scores =
GetOutput(context, node, kSoftNMSOutputTensorSelectedScores);
output_num_selected_indices =
GetOutput(context, node, kSoftNMSOutputTensorNumSelectedIndices);
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kSoftNMSOutputTensorSelectedScores,
&output_selected_scores));
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, kSoftNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
SetTensorSizes(context, output_selected_scores, {max_output_size_value});
Expand All @@ -228,10 +265,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
max_output_size_value, *output_num_selected_indices->data.i32,
output_selected_indices->data.i32, output_selected_scores->data.f);
} else {
output_selected_indices =
GetOutput(context, node, kNMSOutputTensorSelectedIndices);
output_num_selected_indices =
GetOutput(context, node, kNMSOutputTensorNumSelectedIndices);
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kNMSOutputTensorSelectedIndices,
&output_selected_indices));
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
kNMSOutputTensorNumSelectedIndices,
&output_num_selected_indices));
if (!is_max_output_size_const) {
SetTensorSizes(context, output_selected_indices, {max_output_size_value});
}
Expand Down
8 changes: 6 additions & 2 deletions tensorflow/lite/kernels/numeric_verify.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
node->temporaries = TfLiteIntArrayCreate(1);
node->temporaries->data[0] = op_data->cache_tensor_id;

TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/0, &dequantized));
dequantized->type = op_context.ref->type;
dequantized->allocation_type = kTfLiteDynamic;

Expand Down Expand Up @@ -142,7 +144,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
}

// Dequantize the input
TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* dequantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/0, &dequantized));
auto status = builtin::dequantize::DequantizeImpl<kernel_type>(
context, node, op_context.input, dequantized);
if (status != kTfLiteOk) {
Expand Down
17 changes: 12 additions & 5 deletions tensorflow/lite/kernels/pack.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input0 = GetInput(context, node, 0);
const TfLiteTensor* input0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input0));
const int dimension_size = NumDimensions(input0) + 1;
if (data->axis < 0) {
data->axis += dimension_size;
Expand All @@ -55,7 +56,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
// Make sure all inputs have the same shape and type.
for (int i = 1; i < data->values_count; ++i) {
const TfLiteTensor* input = GetInput(context, node, i);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE(context, HaveSameShapes(input0, input));
TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type);
}
Expand All @@ -72,13 +74,16 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
}

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type);

// Guarantee input/output quantization params match as we do not support
// packing quantized tensors.
for (int i = 0; i < data->values_count; i++) {
const TfLiteTensor* input = GetInput(context, node, i);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input));
TF_LITE_ENSURE_EQ(context, input->params.zero_point,
output->params.zero_point);
TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale);
Expand Down Expand Up @@ -106,7 +111,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLitePackParams* data =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (output->type) {
case kTfLiteFloat32: {
return PackImpl<float>(context, node, output, data->values_count,
Expand Down
24 changes: 16 additions & 8 deletions tensorflow/lite/kernels/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,10 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {

TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);

Expand Down Expand Up @@ -368,8 +370,10 @@ TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);

TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
AverageEvalFloat<kernel_type>(context, node, params, data, input, output);
Expand Down Expand Up @@ -399,8 +403,10 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);

TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
MaxEvalFloat<kernel_type>(context, node, params, data, input, output);
Expand Down Expand Up @@ -430,8 +436,10 @@ TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);

TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
L2EvalFloat<kernel_type>(context, node, params, data, input, output);
Expand Down
24 changes: 18 additions & 6 deletions tensorflow/lite/kernels/pow.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

OpData* data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);

Expand Down Expand Up @@ -112,9 +118,15 @@ TfLiteStatus CheckValue(TfLiteContext* context, const TfLiteTensor* input) {
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);

const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (output->type) {
case kTfLiteInt32: {
Expand Down
12 changes: 8 additions & 4 deletions tensorflow/lite/kernels/quantize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));

// TODO(b/128934713): Add support for fixed-point per-channel quantization.
// Currently this only support affine per-layer quantization.
Expand Down Expand Up @@ -141,8 +143,10 @@ template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = static_cast<OpData*>(node->user_data);

const TfLiteTensor* input = GetInput(context, node, 0);
TfLiteTensor* output = GetOutput(context, node, 0);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));

const RuntimeShape input_shape = GetTensorShape(input);
const RuntimeShape output_shape = GetTensorShape(output);
Expand Down
28 changes: 19 additions & 9 deletions tensorflow/lite/kernels/range.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* start = GetInput(context, node, kStartTensor);
const TfLiteTensor* limit = GetInput(context, node, kLimitTensor);
const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor);
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));
// Make sure all the inputs are scalars.
TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0);
Expand All @@ -103,7 +106,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype);
TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype);

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = dtype;

if (IsConstantTensor(start) && IsConstantTensor(limit) &&
Expand All @@ -130,11 +135,16 @@ void EvalImpl(const TfLiteTensor* start, const TfLiteTensor* delta,
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* start = GetInput(context, node, kStartTensor);
const TfLiteTensor* limit = GetInput(context, node, kLimitTensor);
const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor);

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));

TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
Expand Down
7 changes: 5 additions & 2 deletions tensorflow/lite/kernels/rank.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteInt32;

// By design, the input shape is always known at the time of Prepare, even
Expand Down
18 changes: 12 additions & 6 deletions tensorflow/lite/kernels/read_variable.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, node->inputs->size, 1);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);

const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
const TfLiteTensor* input_resource_id_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId,
&input_resource_id_tensor));
TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1);

TfLiteTensor* output = GetOutput(context, node, kOutputValue);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputValue, &output));
SetTensorToDynamic(output);

return kTfLiteOk;
Expand All @@ -48,15 +51,18 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);

const TfLiteTensor* input_resource_id_tensor =
GetInput(context, node, kInputVariableId);
const TfLiteTensor* input_resource_id_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId,
&input_resource_id_tensor));
int resource_id = input_resource_id_tensor->data.i32[0];
auto& resources = subgraph->resources();
auto* variable = resource::GetResourceVariable(&resources, resource_id);
TF_LITE_ENSURE(context, variable != nullptr);

TfLiteTensor* variable_tensor = variable->GetTensor();
TfLiteTensor* output = GetOutput(context, node, kOutputValue);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputValue, &output));

TF_LITE_ENSURE_TYPES_EQ(context, variable_tensor->type, output->type);
TF_LITE_ENSURE_OK(
Expand Down
55 changes: 41 additions & 14 deletions tensorflow/lite/kernels/reduce.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,9 @@ TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(3);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/0, &scratch_tensor));
scratch_tensor->type = kTfLiteInt32;
scratch_tensor->allocation_type = kTfLiteArenaRw;
TfLiteIntArray* index_size = TfLiteIntArrayCreate(1);
Expand All @@ -180,11 +182,15 @@ TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,

// Creates a temp tensor to store resolved axis given input data.
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* resolved_axis;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &resolved_axis));
resolved_axis->type = kTfLiteInt32;
// Creates a temp tensor to store temp sums when calculating mean.
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* temp_sum;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/2, &temp_sum));
switch (op_context->input->type) {
case kTfLiteFloat32:
temp_sum->type = kTfLiteFloat32;
Expand Down Expand Up @@ -217,7 +223,9 @@ TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_TYPES_EQ(context, op_context.axis->type, kTfLiteInt32);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));

TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* resolved_axis;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &resolved_axis));
// Leaves work to Eval if axis is not constant; else resizes output.
if (!IsConstantTensor(op_context.axis)) {
SetTensorToDynamic(op_context.output);
Expand All @@ -233,7 +241,8 @@ TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {

TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* input = GetInput(context, node, 0);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool);
return PrepareSimple(context, node);
}
Expand All @@ -254,7 +263,9 @@ TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
QuantizeMultiplier(real_multiplier, &data->multiplier, &exponent);
data->shift = exponent;
}
TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* temp_sum;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/2, &temp_sum));
if (!IsConstantTensor(op_context.axis)) {
SetTensorToDynamic(temp_sum);
return kTfLiteOk;
Expand Down Expand Up @@ -343,9 +354,15 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);

int num_axis = static_cast<int>(NumElements(op_context.axis));
TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* temp_index;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/0, &temp_index));
TfLiteTensor* resolved_axis;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &resolved_axis));
TfLiteTensor* temp_sum;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/2, &temp_sum));
// Resize the output tensor if the output tensor is dynamic.
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context,
Expand Down Expand Up @@ -490,8 +507,12 @@ TfLiteStatus EvalLogic(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context, T init_value,
T reducer(const T current, const T in)) {
int64_t num_axis = NumElements(op_context->axis);
TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* temp_index;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/0, &temp_index));
TfLiteTensor* resolved_axis;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &resolved_axis));
// Resize the output tensor if the output tensor is dynamic.
if (IsDynamicTensor(op_context->output)) {
TF_LITE_ENSURE_OK(context,
Expand Down Expand Up @@ -621,9 +642,15 @@ TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {
if (need_rescale) {
// Rescaling 8bit reduce sum.
int num_axis = static_cast<int>(NumElements(op_context.axis));
TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2);
TfLiteTensor* temp_index;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/0, &temp_index));
TfLiteTensor* resolved_axis;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, /*index=*/1, &resolved_axis));
TfLiteTensor* temp_sum;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, /*index=*/2, &temp_sum));
// Resize the output tensor if the output tensor is dynamic.
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context,
Expand Down
22 changes: 16 additions & 6 deletions tensorflow/lite/kernels/reshape.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,11 @@ TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)>
scoped_output_shape(output_shape, TfLiteIntArrayFree);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// Tensorflow's Reshape allows one of the shape components to have the
// special -1 value, meaning it will be calculated automatically based on the
Expand Down Expand Up @@ -70,6 +73,7 @@ TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) {
inline TfLiteIntArray* GetOutputShapeFromTensor(TfLiteContext* context,
TfLiteNode* node) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
if (shape == nullptr) return nullptr;

TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]);
for (int i = 0; i < output_shape->size; ++i) {
Expand Down Expand Up @@ -103,7 +107,8 @@ inline TfLiteIntArray* GetOutputShapeFromParam(TfLiteContext* context,
// Check if the shape tensor is valid. Shapes should be int32 vectors.
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
return (shape->dims->size == 1 && shape->type == kTfLiteInt32);
return (shape != nullptr && shape->dims->size == 1 &&
shape->type == kTfLiteInt32);
}

TfLiteIntArray* GetOutputShape(TfLiteContext* context, TfLiteNode* node) {
Expand All @@ -122,7 +127,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
// calculate their shapes now. String tensors don't benefit from having their
// shapes precalculated because the actual memory can only be allocated after
// we know all the content.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type != kTfLiteString) {
if (NumInputs(node) == 1 ||
IsConstantTensor(GetInput(context, node, kShapeTensor))) {
Expand All @@ -135,8 +142,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// There are two ways in which the 'output' can be made dynamic: it could be
// a string tensor, or its shape cannot be calculated during Prepare(). In
Expand Down
20 changes: 14 additions & 6 deletions tensorflow/lite/kernels/resize_bilinear.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,13 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// TODO(ahentz): Our current implementations rely on the inputs being 4D.
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
Expand Down Expand Up @@ -96,9 +100,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));

if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
Expand Down
20 changes: 14 additions & 6 deletions tensorflow/lite/kernels/resize_nearest_neighbor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,13 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// TODO(ahentz): Our current implementations rely on the input being 4D,
// and the size being 1D tensor with exactly 2 elements.
Expand All @@ -85,9 +89,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeNearestNeighborParams*>(node->builtin_data);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));

if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
Expand Down
21 changes: 15 additions & 6 deletions tensorflow/lite/kernels/reverse.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis = GetInput(context, node, kAxisTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxisTensor, &axis));
TF_LITE_ENSURE_EQ(context, NumDimensions(axis), 1);
TF_LITE_ENSURE(context, NumDimensions(input) >= NumElements(axis));

Expand All @@ -59,24 +61,31 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
context->ReportError(context, "Current does not support more than 1 axis.");
}

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);

return context->ResizeTensor(context, output, output_shape);
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis_tensor;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kAxisTensor, &axis_tensor));
int axis = GetTensorData<int32_t>(axis_tensor)[0];
const int rank = NumDimensions(input);
if (axis < 0) {
axis += rank;
}

TF_LITE_ENSURE(context, axis >= 0 && axis < rank);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (output->type) {
case kTfLiteFloat32: {
Expand Down
32 changes: 22 additions & 10 deletions tensorflow/lite/kernels/reverse_sequence.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* seq_lengths = GetInput(context, node, kSeqLengthsTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* seq_lengths;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kSeqLengthsTensor, &seq_lengths));
TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1);

if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
Expand All @@ -56,7 +59,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);

Expand All @@ -65,9 +70,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

template <typename T, typename TS>
TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* seq_lengths_tensor =
GetInput(context, node, kSeqLengthsTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* seq_lengths_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSeqLengthsTensor,
&seq_lengths_tensor));
const TS* seq_lengths = GetTensorData<TS>(seq_lengths_tensor);

auto* params =
Expand All @@ -86,7 +93,9 @@ TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, seq_lengths[i] <= SizeOfDimension(input, seq_dim));
}

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

reference_ops::ReverseSequence<T, TS>(
seq_lengths, seq_dim, batch_dim, GetTensorShape(input),
Expand All @@ -98,8 +107,9 @@ TfLiteStatus ReverseSequenceImpl(TfLiteContext* context, TfLiteNode* node) {

template <typename T>
TfLiteStatus ReverseSequenceHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* seq_lengths_tensor =
GetInput(context, node, kSeqLengthsTensor);
const TfLiteTensor* seq_lengths_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSeqLengthsTensor,
&seq_lengths_tensor));
switch (seq_lengths_tensor->type) {
case kTfLiteInt32: {
return ReverseSequenceImpl<T, int32_t>(context, node);
Expand All @@ -119,7 +129,9 @@ TfLiteStatus ReverseSequenceHelper(TfLiteContext* context, TfLiteNode* node) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (output->type) {
case kTfLiteFloat32: {
Expand Down
92 changes: 64 additions & 28 deletions tensorflow/lite/kernels/rfft2d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,16 +73,20 @@ static TfLiteStatus InitTemporaryTensors(TfLiteContext* context,
data->fft_double_working_area_id = first_new_index + 1;

// Set up FFT integer working area buffer.
TfLiteTensor* fft_integer_working_area =
GetTemporary(context, node, kFftIntegerWorkingAreaTensor);
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
fft_integer_working_area->type = kTfLiteInt32;
// If fft_length is not a constant tensor, fft_integer_working_area will be
// set to dynamic later in Prepare.
fft_integer_working_area->allocation_type = kTfLiteArenaRw;

// Set up FFT double working area buffer.
TfLiteTensor* fft_double_working_area =
GetTemporary(context, node, kFftDoubleWorkingAreaTensor);
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
// fft_double_working_area is a double tensor. Ideally, double should be
// added into tflite data types. However, since fft_double_working_area is a
// temporary tensor, and there are no ops having double input/output tensors
Expand All @@ -100,10 +104,13 @@ static TfLiteStatus InitTemporaryTensors(TfLiteContext* context,

TfLiteStatus ResizeOutputandTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const int num_dims = NumDimensions(input);
TF_LITE_ENSURE(context, num_dims >= 2);
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
// The lib, fft2d, can only handle fft_lengths of power of 2.
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[0]));
Expand All @@ -116,24 +123,30 @@ TfLiteStatus ResizeOutputandTemporaryTensors(TfLiteContext* context,
int half_fft_working_length = fft_working_length / 2;

// Resize output tensor.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
output_shape->data[num_dims - 2] = fft_length_data[0];
output_shape->data[num_dims - 1] = fft_length_data[1] / 2 + 1;
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));

// Resize temporary tensors, fft_integer_working_area.
TfLiteTensor* fft_integer_working_area =
GetTemporary(context, node, kFftIntegerWorkingAreaTensor);
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteIntArray* fft_integer_working_area_shape = TfLiteIntArrayCreate(1);
fft_integer_working_area_shape->data[0] =
2 + static_cast<int>(sqrt(fft_working_length));
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_integer_working_area,
fft_integer_working_area_shape));

// Resize temporary tensors, fft_double_working_area.
TfLiteTensor* fft_double_working_area =
GetTemporary(context, node, kFftDoubleWorkingAreaTensor);
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
TfLiteIntArray* fft_double_working_area_shape = TfLiteIntArrayCreate(1);
fft_double_working_area_shape->data[0] =
half_fft_working_length + fft_width / 4;
Expand All @@ -157,7 +170,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

// Check type and shape of the input tensor
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 2);
if (input->type != kTfLiteFloat32) {
context->ReportError(context,
Expand All @@ -167,7 +181,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

// Check type and shape of the fft_length tensor
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const RuntimeShape fft_length_shape = GetTensorShape(fft_length);

TF_LITE_ENSURE_EQ(context, NumDimensions(fft_length), 1);
Expand All @@ -183,17 +199,23 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_STATUS(InitTemporaryTensors(context, node));

// Set output type
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteComplex64;

// Exit early if fft_length is a non-const tensor. Set output tensor and
// temporary tensors to dynamic, so that their tensor sizes can be determined
// in Eval.
if (!IsConstantTensor(fft_length)) {
TfLiteTensor* fft_integer_working_area =
GetTemporary(context, node, kFftIntegerWorkingAreaTensor);
TfLiteTensor* fft_double_working_area =
GetTemporary(context, node, kFftDoubleWorkingAreaTensor);
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
SetTensorToDynamic(fft_integer_working_area);
SetTensorToDynamic(fft_double_working_area);
SetTensorToDynamic(output);
Expand Down Expand Up @@ -325,11 +347,16 @@ void PrepareOutputBuffer(complex<float>* output_data, int fft_height,
}

TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const float* input_data = GetTensorData<float>(input);
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
complex<float>* output_data = GetTensorData<complex<float>>(output);

int fft_height, fft_width;
Expand Down Expand Up @@ -358,14 +385,18 @@ TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) {
}

// Get buffer for integer working area.
TfLiteTensor* fft_integer_working_area =
GetTemporary(context, node, kFftIntegerWorkingAreaTensor);
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
int* fft_integer_working_area_data =
GetTensorData<int>(fft_integer_working_area);

// Get buffer for double working area.
TfLiteTensor* fft_double_working_area =
GetTemporary(context, node, kFftDoubleWorkingAreaTensor);
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
// Get double value out of the memory of fft_double_working_area_data.
double* fft_double_working_area_data = reinterpret_cast<double*>(
GetTensorData<int64_t>(fft_double_working_area));
Expand Down Expand Up @@ -393,10 +424,15 @@ TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (output->type != kTfLiteComplex64) {
context->ReportError(context,
Expand Down
14 changes: 10 additions & 4 deletions tensorflow/lite/kernels/round.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,11 @@ constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;

TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
Expand All @@ -41,8 +44,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
Expand Down
26 changes: 18 additions & 8 deletions tensorflow/lite/kernels/scatter_nd.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* indices = GetInput(context, node, kIndices);
const TfLiteTensor* updates = GetInput(context, node, kUpdates);
const TfLiteTensor* shape = GetInput(context, node, kShape);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));

switch (updates->type) {
case kTfLiteFloat32:
Expand All @@ -96,7 +99,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}

TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = updates->type;

if (IsConstantTensor(shape)) {
Expand Down Expand Up @@ -163,10 +168,15 @@ TfLiteStatus EvalScatterNd(TfLiteContext* context, const TfLiteTensor* indices,
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices = GetInput(context, node, kIndices);
const TfLiteTensor* updates = GetInput(context, node, kUpdates);
const TfLiteTensor* shape = GetInput(context, node, kShape);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

switch (indices->type) {
case kTfLiteInt32:
Expand Down
27 changes: 18 additions & 9 deletions tensorflow/lite/kernels/segment_sum.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,15 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* data = GetInput(context, node, kInputDataTensor);
const TfLiteTensor* segment_ids =
GetInput(context, node, kInputSegmentIdsTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);

const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
data->type == kTfLiteInt32 || data->type == kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, segment_ids->type, kTfLiteInt32);
Expand All @@ -82,10 +86,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* data = GetInput(context, node, kInputDataTensor);
const TfLiteTensor* segment_ids =
GetInput(context, node, kInputSegmentIdsTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* data;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputDataTensor, &data));
const TfLiteTensor* segment_ids;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputSegmentIdsTensor,
&segment_ids));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
Expand Down
34 changes: 24 additions & 10 deletions tensorflow/lite/kernels/select.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,18 @@ TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input_condition =
GetInput(context, node, kInputTensorCondition);
const TfLiteTensor* input_x = GetInput(context, node, kInputTensorX);
const TfLiteTensor* input_y = GetInput(context, node, kInputTensorY);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input_condition;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorCondition,
&input_condition));
const TfLiteTensor* input_x;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorX, &input_x));
const TfLiteTensor* input_y;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorY, &input_y));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// Input must be bool.
TF_LITE_ENSURE_TYPES_EQ(context, input_condition->type, kTfLiteBool);
Expand Down Expand Up @@ -111,11 +118,18 @@ TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) {

TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input_condition =
GetInput(context, node, kInputTensorCondition);
const TfLiteTensor* input_x = GetInput(context, node, kInputTensorX);
const TfLiteTensor* input_y = GetInput(context, node, kInputTensorY);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input_condition;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensorCondition,
&input_condition));
const TfLiteTensor* input_x;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorX, &input_x));
const TfLiteTensor* input_y;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorY, &input_y));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

#define TF_LITE_SELECT(type, op) \
reference_ops::op(GetTensorShape(input_condition), \
Expand Down
7 changes: 5 additions & 2 deletions tensorflow/lite/kernels/shape.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

auto* params = reinterpret_cast<TfLiteShapeParams*>(node->builtin_data);
switch (params->out_type) {
Expand Down
15 changes: 10 additions & 5 deletions tensorflow/lite/kernels/skip_gram.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ limitations under the License.

#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"

Expand All @@ -48,10 +49,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

TF_LITE_ENSURE_TYPES_EQ(context, GetInput(context, node, 0)->type,
kTfLiteString);
TF_LITE_ENSURE_TYPES_EQ(context, GetOutput(context, node, 0)->type,
kTfLiteString);
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, input_tensor->type, kTfLiteString);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, output_tensor->type, kTfLiteString);
return kTfLiteOk;
}

Expand Down Expand Up @@ -91,7 +94,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {

// Split sentence to words.
std::vector<StringRef> words;
tflite::StringRef strref = tflite::GetString(GetInput(context, node, 0), 0);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
tflite::StringRef strref = tflite::GetString(input, 0);
int prev_idx = 0;
for (int i = 1; i < strref.len; i++) {
if (isspace(*(strref.str + i))) {
Expand Down
26 changes: 18 additions & 8 deletions tensorflow/lite/kernels/slice.cc
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* begin = GetInput(context, node, kBeginTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* begin;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBeginTensor, &begin));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

// Ensure validity of input tensor and its dimension.
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
Expand All @@ -142,10 +147,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {

template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* begin = GetInput(context, node, kBeginTensor);
const TfLiteTensor* size = GetInput(context, node, kSizeTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* begin;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBeginTensor, &begin));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
Expand Down
14 changes: 10 additions & 4 deletions tensorflow/lite/kernels/space_to_depth.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);

Expand Down Expand Up @@ -80,8 +83,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteSpaceToDepthParams*>(node->builtin_data);

const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));

#define TF_LITE_SPACE_TO_DEPTH(type, scalar) \
tflite::SpaceToDepthParams op_params; \
Expand Down
Loading