Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unused MicroInterpreter::tensor() API. #49114

Merged
merged 1 commit into from
May 11, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
12 changes: 0 additions & 12 deletions tensorflow/lite/micro/micro_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -305,18 +305,6 @@ TfLiteTensor* MicroInterpreter::output(size_t index) {
return output_tensors_[index];
}

TfLiteTensor* MicroInterpreter::tensor(size_t index) {
const size_t length = tensors_size();
if (index >= length) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Tensor index %d out of range (length is %d)", index,
length);
return nullptr;
}
return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_,
index);
}

TfLiteStatus MicroInterpreter::ResetVariableTensors() {
for (size_t i = 0; i < subgraph_->tensors()->size(); ++i) {
auto* tensor = subgraph_->tensors()->Get(i);
Expand Down
12 changes: 0 additions & 12 deletions tensorflow/lite/micro/micro_interpreter.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,6 @@ class MicroInterpreter {
// TODO(b/149795762): Add this to the TfLiteStatus enum.
TfLiteStatus Invoke();

size_t tensors_size() const { return context_.tensors_size; }
TfLiteTensor* tensor(size_t tensor_index);
template <class T>
T* typed_tensor(int tensor_index) {
if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) {
if (tensor_ptr->type == typeToTfLiteType<T>()) {
return GetTensorData<T>(tensor_ptr);
}
}
return nullptr;
}

TfLiteTensor* input(size_t index);
size_t inputs_size() const { return subgraph_->inputs()->Length(); }
const flatbuffers::Vector<int32_t>& inputs() const {
Expand Down
77 changes: 0 additions & 77 deletions tensorflow/lite/micro/micro_interpreter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ TF_LITE_MICRO_TEST(TestInterpreter) {
TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 928 + 100);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(2), interpreter.outputs_size());
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), interpreter.tensors_size());

TfLiteTensor* input = interpreter.input(0);
TF_LITE_MICRO_EXPECT_NE(nullptr, input);
Expand Down Expand Up @@ -251,81 +250,6 @@ TF_LITE_MICRO_TEST(TestKernelMemoryPlanning) {
}
}

TF_LITE_MICRO_TEST(TestVariableTensorReset) {
const tflite::Model* model = tflite::testing::GetComplexMockModel();
TF_LITE_MICRO_EXPECT_NE(nullptr, model);

tflite::AllOpsResolver op_resolver = tflite::testing::GetOpResolver();

constexpr size_t allocator_buffer_size =
3072 /* optimal arena size at the time of writting. */ +
16 /* alignment */ + 100 /* some headroom */;
uint8_t allocator_buffer[allocator_buffer_size];
tflite::MicroInterpreter interpreter(model, op_resolver, allocator_buffer,
allocator_buffer_size,
tflite::GetMicroErrorReporter());
TF_LITE_MICRO_EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TF_LITE_MICRO_EXPECT_LE(interpreter.arena_used_bytes(), 2096 + 100);
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.inputs_size());
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.outputs_size());

// Assign hard-code values:
for (size_t i = 0; i < interpreter.tensors_size(); ++i) {
TfLiteTensor* cur_tensor = interpreter.tensor(i);
int buffer_length = tflite::ElementCount(*cur_tensor->dims);
// Assign all buffers to non-zero values. Variable tensors will be assigned
// 2 here and will be verified that they have been reset after the API call.
int buffer_value = cur_tensor->is_variable ? 2 : 1;
switch (cur_tensor->type) {
case kTfLiteInt32: {
int32_t* buffer = tflite::GetTensorData<int32_t>(cur_tensor);
for (int j = 0; j < buffer_length; ++j) {
buffer[j] = static_cast<int32_t>(buffer_value);
}
break;
}
case kTfLiteUInt8: {
uint8_t* buffer = tflite::GetTensorData<uint8_t>(cur_tensor);
for (int j = 0; j < buffer_length; ++j) {
buffer[j] = static_cast<uint8_t>(buffer_value);
}
break;
}
default:
TF_LITE_MICRO_FAIL("Unsupported dtype");
}
}

interpreter.ResetVariableTensors();

// Ensure only variable tensors have been reset to zero:
for (size_t i = 0; i < interpreter.tensors_size(); ++i) {
TfLiteTensor* cur_tensor = interpreter.tensor(i);
int buffer_length = tflite::ElementCount(*cur_tensor->dims);
// Variable tensors should be zero (not the value assigned in the for loop
// above).
int buffer_value = cur_tensor->is_variable ? 0 : 1;
switch (cur_tensor->type) {
case kTfLiteInt32: {
int32_t* buffer = tflite::GetTensorData<int32_t>(cur_tensor);
for (int j = 0; j < buffer_length; ++j) {
TF_LITE_MICRO_EXPECT_EQ(buffer_value, buffer[j]);
}
break;
}
case kTfLiteUInt8: {
uint8_t* buffer = tflite::GetTensorData<uint8_t>(cur_tensor);
for (int j = 0; j < buffer_length; ++j) {
TF_LITE_MICRO_EXPECT_EQ(buffer_value, buffer[j]);
}
break;
}
default:
TF_LITE_MICRO_FAIL("Unsupported dtype");
}
}
}

// The interpreter initialization requires multiple steps and this test case
// ensures that simply creating and destructing an interpreter object is ok.
// b/147830765 has one example of a change that caused trouble for this simple
Expand Down Expand Up @@ -508,7 +432,6 @@ TF_LITE_MICRO_TEST(TestInterpreterMultipleInputs) {

TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(3), interpreter.inputs_size());
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(1), interpreter.outputs_size());
TF_LITE_MICRO_EXPECT_EQ(static_cast<size_t>(4), interpreter.tensors_size());

TfLiteTensor* input = interpreter.input(0);
TF_LITE_MICRO_EXPECT_NE(nullptr, input);
Expand Down