diff --git a/tensorflow/lite/micro/micro_interpreter_graph.cc b/tensorflow/lite/micro/micro_interpreter_graph.cc index 8426a84be57..3d6c58c57a7 100644 --- a/tensorflow/lite/micro/micro_interpreter_graph.cc +++ b/tensorflow/lite/micro/micro_interpreter_graph.cc @@ -15,6 +15,8 @@ limitations under the License. #include "tensorflow/lite/micro/micro_interpreter_graph.h" +#include + #include "flatbuffers/flatbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" @@ -42,6 +44,34 @@ const char* OpNameFromRegistration(const TFLMRegistration* registration) { } } +// Check tensor shapes to determine if there are dynamic tensors present. +// Returns the index of the first dynamic tensor found, otherwise returns -1. +int CheckDynamicTensors(const TfLiteIntArray* const tensor_indices, + const TfLiteEvalTensor* const eval_tensors) { + // some operators have no tensors, so node->inputs and/or node->outputs + // can be . This occurs in the MicroInterpreter unit tests. + if (tensor_indices == nullptr) { + return -1; + } + + for (int i = 0; i < tensor_indices->size; i++) { + const int tensor_index = tensor_indices->data[i]; + // Skip optional tensors + if (tensor_index < 0) { + continue; + } + // Check shape for dims <= 0. + // This code handles legacy scalar tensors (dims->size == 0). + const TfLiteEvalTensor* const tp = eval_tensors + tensor_index; + if (!std::all_of(tp->dims->data, tp->dims->data + tp->dims->size, + [](int dim) { return dim > 0; })) { + return tensor_index; + } + } + + return -1; +} + } // namespace MicroInterpreterGraph::MicroInterpreterGraph( @@ -117,7 +147,7 @@ TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() { if (registration->prepare != nullptr) { TfLiteStatus prepare_status = registration->prepare(context_, node); if (prepare_status != kTfLiteOk) { - MicroPrintf("Node %s (number %df) failed to prepare with status %d", + MicroPrintf("Node %s (number %u) failed to prepare with status %d", OpNameFromRegistration(registration), current_operator_index_, prepare_status); return kTfLiteError; @@ -126,6 +156,18 @@ TfLiteStatus MicroInterpreterGraph::PrepareSubgraphs() { GetMicroContext(context_)->ResetDecompressionMemoryAllocations(); #endif // USE_TFLM_COMPRESSION } + + const int dynamic_tensor_index = CheckDynamicTensors( + node->outputs, subgraph_allocations_[subgraph_idx].tensors); + if (dynamic_tensor_index != -1) { + MicroPrintf( + "Op#%u (%s) of subgraph %u has dynamic tensor #%d\n" + "Dynamic tensors are not supported", + current_operator_index_, OpNameFromRegistration(registration), + current_subgraph_index_, dynamic_tensor_index); + return kTfLiteError; + } + allocator_->FinishPrepareNodeAllocations( /*node_id=*/current_operator_index_); } @@ -205,6 +247,7 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) { subgraph_idx, subgraphs_->size()); return kTfLiteError; } + TfLiteStatus invoke_status = kTfLiteOk; uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); for (current_operator_index_ = 0; current_operator_index_ < operators_size; ++current_operator_index_) { @@ -226,7 +269,7 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) { #endif TFLITE_DCHECK(registration->invoke); - TfLiteStatus invoke_status = registration->invoke(context_, node); + invoke_status = registration->invoke(context_, node); #ifdef USE_TFLM_COMPRESSION GetMicroContext(context_)->ResetDecompressionMemoryAllocations(); #endif // USE_TFLM_COMPRESSION @@ -243,12 +286,15 @@ TfLiteStatus MicroInterpreterGraph::InvokeSubgraph(int subgraph_idx) { OpNameFromRegistration(registration), current_operator_index_, invoke_status); } - return invoke_status; + // make sure to restore subgraph and operator indices + break; } } + current_subgraph_index_ = previous_subgraph_idx; current_operator_index_ = previous_operator_idx; - return kTfLiteOk; + + return invoke_status; } TfLiteStatus MicroInterpreterGraph::ResetVariableTensors() { diff --git a/tensorflow/lite/micro/micro_interpreter_test.cc b/tensorflow/lite/micro/micro_interpreter_test.cc index eebdd39f12a..ee1592579a0 100644 --- a/tensorflow/lite/micro/micro_interpreter_test.cc +++ b/tensorflow/lite/micro/micro_interpreter_test.cc @@ -1,4 +1,4 @@ -/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2025 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -778,4 +778,35 @@ TF_LITE_MICRO_TEST(TestGetTensorFailsNoLinearMemoryPlanner) { TF_LITE_MICRO_EXPECT(interpreter.GetTensor(0) == nullptr); } +TF_LITE_MICRO_TEST(TestDynamicTensorFails) { + tflite::testing::TestingOpResolver op_resolver; + TF_LITE_MICRO_EXPECT_EQ(kTfLiteOk, + tflite::testing::GetTestingOpResolver(op_resolver)); + + constexpr size_t kAllocatorBufferSize = 2000; + uint8_t allocator_buffer[kAllocatorBufferSize]; + + // Use a new scope for each MicroInterpreter + { + // test with 0 in shape + const tflite::Model* model = + tflite::testing::GetNoOpModelWithTensorShape({3, 2, 0}); + TF_LITE_MICRO_EXPECT(nullptr != model); + tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, + kAllocatorBufferSize); + TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError); + } + + // Use a new scope for each MicroInterpreter + { + // test with -1 in shape + const tflite::Model* model = + tflite::testing::GetNoOpModelWithTensorShape({3, 2, -1}); + TF_LITE_MICRO_EXPECT(nullptr != model); + tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, + kAllocatorBufferSize); + TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError); + } +} + TF_LITE_MICRO_TESTS_END diff --git a/tensorflow/lite/micro/test_helpers.cc b/tensorflow/lite/micro/test_helpers.cc index ff786a7b0b1..3b55c50d564 100644 --- a/tensorflow/lite/micro/test_helpers.cc +++ b/tensorflow/lite/micro/test_helpers.cc @@ -103,13 +103,14 @@ class ModelBuilder { Operator RegisterOp(BuiltinOperator op, const char* custom_code); // Adds a tensor to the model. - Tensor AddTensor(TensorType type, std::initializer_list shape) { + Tensor AddTensor(TensorType type, + const std::initializer_list shape) { return AddTensorImpl(type, /* is_variable */ false, shape); } // Adds a variable tensor to the model. Tensor AddVariableTensor(TensorType type, - std::initializer_list shape) { + const std::initializer_list shape) { return AddTensorImpl(type, /* is_variable */ true, shape); } @@ -133,7 +134,7 @@ class ModelBuilder { private: // Adds a tensor to the model. Tensor AddTensorImpl(TensorType type, bool is_variable, - std::initializer_list shape); + const std::initializer_list shape); flatbuffers::FlatBufferBuilder* builder_; @@ -1546,6 +1547,23 @@ const Model* BuildSimpleMockModelWithNullInputsOutputs() { return model; } +const Model* BuildNoOpModelWithTensorShape( + const std::initializer_list& shape) { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); + + ModelBuilder model_builder(fb_builder); + + // build model with 2 tensor outputs, the first with shape [3, 2] + // and the second with the supplied shape. + const int op_id = model_builder.RegisterOp(BuiltinOperator_CUSTOM, "no_op"); + const int tensor_0 = model_builder.AddTensor(TensorType_INT8, {3, 2}); + const int tensor_1 = model_builder.AddTensor(TensorType_INT8, shape); + + model_builder.AddNode(op_id, {}, {tensor_0, tensor_1}, {}); + return model_builder.BuildModel({}, {tensor_0, tensor_1}); +} + } // namespace const TFLMRegistration* SimpleStatefulOp::getRegistration() { @@ -1912,6 +1930,12 @@ const Model* GetSimpleStatefulModel() { return model; } +const Model* GetNoOpModelWithTensorShape( + const std::initializer_list& shape) { + // don't cache the model as the tensor shape can be different on each call + return const_cast(BuildNoOpModelWithTensorShape(shape)); +} + const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) { using flatbuffers::Offset; flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); diff --git a/tensorflow/lite/micro/test_helpers.h b/tensorflow/lite/micro/test_helpers.h index f7bb3791415..20514b4f480 100644 --- a/tensorflow/lite/micro/test_helpers.h +++ b/tensorflow/lite/micro/test_helpers.h @@ -19,6 +19,7 @@ limitations under the License. #include #include #include +#include #include #include @@ -190,6 +191,11 @@ const Model* GetModelWithIfAndSubgraphInputTensorOverlap(); // Returns a flatbuffer model with null subgraph/operator inputs and outputs. const Model* GetSimpleModelWithNullInputsAndOutputs(); +// Returns a flatbuffer model with no inputs and two outputs, the second +// of which has the supplied shape. +const Model* GetNoOpModelWithTensorShape( + const std::initializer_list& shape); + // Builds a one-dimensional flatbuffer tensor of the given size. const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false);