From 52cd43c0384788a7ca98672c6ca6fee657b07f27 Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Tue, 11 May 2021 11:42:48 -0700 Subject: [PATCH] Remove unused MicroInterpreter::tensor() API. The implementation for this function was always allocating a persistent buffer which would mean that calling this function repeatedly would unexpectedly result in an error as a result of running out of space in the arena (basically a memory leak). Additionally, it appears that the function was only being used to test the ResetVariableTensor API. --- tensorflow/lite/micro/micro_interpreter.cc | 12 --- tensorflow/lite/micro/micro_interpreter.h | 12 --- .../lite/micro/micro_interpreter_test.cc | 77 ------------------- 3 files changed, 101 deletions(-) diff --git a/tensorflow/lite/micro/micro_interpreter.cc b/tensorflow/lite/micro/micro_interpreter.cc index 3dc6611e1d8374..e05a35a4bc59c4 100644 --- a/tensorflow/lite/micro/micro_interpreter.cc +++ b/tensorflow/lite/micro/micro_interpreter.cc @@ -305,18 +305,6 @@ TfLiteTensor* MicroInterpreter::output(size_t index) { return output_tensors_[index]; } -TfLiteTensor* MicroInterpreter::tensor(size_t index) { - const size_t length = tensors_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Tensor index %d out of range (length is %d)", index, - length); - return nullptr; - } - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - index); -} - TfLiteStatus MicroInterpreter::ResetVariableTensors() { for (size_t i = 0; i < subgraph_->tensors()->size(); ++i) { auto* tensor = subgraph_->tensors()->Get(i); diff --git a/tensorflow/lite/micro/micro_interpreter.h b/tensorflow/lite/micro/micro_interpreter.h index 7da4c0b85dcaa7..d34015a0155195 100644 --- a/tensorflow/lite/micro/micro_interpreter.h +++ b/tensorflow/lite/micro/micro_interpreter.h @@ -68,18 +68,6 @@ class MicroInterpreter { // TODO(b/149795762): Add this to the TfLiteStatus enum. TfLiteStatus Invoke(); - size_t tensors_size() const { return context_.tensors_size; } - TfLiteTensor* tensor(size_t tensor_index); - template - T* typed_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } - TfLiteTensor* input(size_t index); size_t inputs_size() const { return subgraph_->inputs()->Length(); } const flatbuffers::Vector& inputs() const { diff --git a/tensorflow/lite/micro/micro_interpreter_test.cc b/tensorflow/lite/micro/micro_interpreter_test.cc index 5b775f67f3618e..c0f8667a29d931 100644 --- a/tensorflow/lite/micro/micro_interpreter_test.cc +++ b/tensorflow/lite/micro/micro_interpreter_test.cc @@ -74,7 +74,6 @@ TF_LITE_MICRO_TEST(TestInterpreter) { TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100); TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); TF_LITE_MICRO_EXPECT_EQ(static_cast(2), interpreter.outputs_size()); - TF_LITE_MICRO_EXPECT_EQ(static_cast(4), interpreter.tensors_size()); TfLiteTensor* input = interpreter.input(0); TF_LITE_MICRO_EXPECT_NE(nullptr, input); @@ -251,81 +250,6 @@ TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) { } } -TF_LITE_MICRO_TEST(TestVariableTensorReset) { - const tflite::Model* model = tflite::testing::GetComplexMockModel(); - TF_LITE_MICRO_EXPECT_NE(nullptr, model); - - tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver(); - - constexpr size_t allocator_buffer_size = - 3072 /* optimal arena size at the time of writting. */ + - 16 /* alignment */ + 100 /* some headroom */; - uint8_t allocator_buffer[allocator_buffer_size]; - tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer, - allocator_buffer_size, - tflite::GetMicroErrorReporter()); - TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk); - TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 2096 + 100); - TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.inputs_size()); - TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.outputs_size()); - - // Assign hard-code values: - for (size_t i = 0; i < interpreter.tensors_size(); ++i) { - TfLiteTensor* cur_tensor = interpreter.tensor(i); - int buffer_length = tflite::ElementCount(*cur_tensor->dims); - // Assign all buffers to non-zero values. Variable tensors will be assigned - // 2 here and will be verified that they have been reset after the API call. - int buffer_value = cur_tensor->is_variable ? 2 : 1; - switch (cur_tensor->type) { - case kTfLiteInt32: { - int32_t* buffer = tflite::GetTensorData(cur_tensor); - for (int j = 0; j < buffer_length; ++j) { - buffer[j] = static_cast(buffer_value); - } - break; - } - case kTfLiteUInt8: { - uint8_t* buffer = tflite::GetTensorData(cur_tensor); - for (int j = 0; j < buffer_length; ++j) { - buffer[j] = static_cast(buffer_value); - } - break; - } - default: - TF_LITE_MICRO_FAIL("Unsupported dtype"); - } - } - - interpreter.ResetVariableTensors(); - - // Ensure only variable tensors have been reset to zero: - for (size_t i = 0; i < interpreter.tensors_size(); ++i) { - TfLiteTensor* cur_tensor = interpreter.tensor(i); - int buffer_length = tflite::ElementCount(*cur_tensor->dims); - // Variable tensors should be zero (not the value assigned in the for loop - // above). - int buffer_value = cur_tensor->is_variable ? 0 : 1; - switch (cur_tensor->type) { - case kTfLiteInt32: { - int32_t* buffer = tflite::GetTensorData(cur_tensor); - for (int j = 0; j < buffer_length; ++j) { - TF_LITE_MICRO_EXPECT_EQ(buffer_value, buffer[j]); - } - break; - } - case kTfLiteUInt8: { - uint8_t* buffer = tflite::GetTensorData(cur_tensor); - for (int j = 0; j < buffer_length; ++j) { - TF_LITE_MICRO_EXPECT_EQ(buffer_value, buffer[j]); - } - break; - } - default: - TF_LITE_MICRO_FAIL("Unsupported dtype"); - } - } -} - // The interpreter initialization requires multiple steps and this test case // ensures that simply creating and destructing an interpreter object is ok. // b/147830765 has one example of a change that caused trouble for this simple @@ -508,7 +432,6 @@ TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) { TF_LITE_MICRO_EXPECT_EQ(static_cast(3), interpreter.inputs_size()); TF_LITE_MICRO_EXPECT_EQ(static_cast(1), interpreter.outputs_size()); - TF_LITE_MICRO_EXPECT_EQ(static_cast(4), interpreter.tensors_size()); TfLiteTensor* input = interpreter.input(0); TF_LITE_MICRO_EXPECT_NE(nullptr, input);