Skip to content

Commit

Permalink
Add support for offline planning of variable tensors
Browse files Browse the repository at this point in the history
Co-authored-by: Fredrik Svedberg <fredrik.svedberg@arm.com>
Co-authored-by: Måns Nilsson mans.nilsson@arm.com
  • Loading branch information
fresve-arm authored and mansnils committed Jan 20, 2023
1 parent e50c14f commit 3c227d3
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 33 deletions.
6 changes: 5 additions & 1 deletion tensorflow/lite/micro/micro_allocation_info.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -204,6 +204,10 @@ TfLiteStatus AllocationInfoBuilder::InitializeAllocationInfo(
(current->bytes != 0);
if (offline_offsets) {
current->offline_offset = offline_offsets[i];
if (subgraph->tensors()->Get(i)->is_variable() &&
current->offline_offset != kOnlinePlannedBuffer) {
current->needs_allocating = true;
}
} else {
current->offline_offset = kOnlinePlannedBuffer;
}
Expand Down
49 changes: 27 additions & 22 deletions tensorflow/lite/micro/micro_allocator.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -490,15 +490,6 @@ TfLiteStatus MicroAllocator::FinishModelAllocation(
TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles(
scratch_buffer_handles, scratch_buffer_request_count_));

// Allocate buffers for variable tensors.
for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size();
subgraph_idx++) {
const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx);
TFLITE_DCHECK(subgraph != nullptr);
TF_LITE_ENSURE_STATUS(AllocateVariables(
subgraph, subgraph_allocations[subgraph_idx].tensors));
}

// Plan all subgraphs and scratch buffers together.
TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph_allocations,
*scratch_buffer_handles));
Expand Down Expand Up @@ -754,23 +745,27 @@ TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors(
return kTfLiteOk;
}

TfLiteStatus MicroAllocator::AllocateVariables(const SubGraph* subgraph,
TfLiteEvalTensor* eval_tensors) {
TfLiteStatus MicroAllocator::AllocateVariables(
const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors,
const int32_t* offline_planner_offsets) {
for (size_t i = 0; i < subgraph->tensors()->size(); ++i) {
auto* tensor = subgraph->tensors()->Get(i);
if (tensor->is_variable()) {
size_t buffer_size;
TF_LITE_ENSURE_STATUS(
TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size));
if (offline_planner_offsets == nullptr ||
offline_planner_offsets[i] == kOnlinePlannedBuffer) {
size_t buffer_size;
TF_LITE_ENSURE_STATUS(
TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size));

eval_tensors[i].data.data =
persistent_buffer_allocator_->AllocatePersistentBuffer(
buffer_size, MicroArenaBufferAlignment());
eval_tensors[i].data.data =
persistent_buffer_allocator_->AllocatePersistentBuffer(
buffer_size, MicroArenaBufferAlignment());

if (eval_tensors[i].data.data == nullptr) {
MicroPrintf("Failed to allocate variable tensor of size %d",
buffer_size);
return kTfLiteError;
if (eval_tensors[i].data.data == nullptr) {
MicroPrintf("Failed to allocate variable tensor of size %d",
buffer_size);
return kTfLiteError;
}
}
}
}
Expand Down Expand Up @@ -819,6 +814,16 @@ TfLiteStatus MicroAllocator::CommitStaticMemoryPlan(
const int32_t* offline_planner_offsets = nullptr;
TF_LITE_ENSURE_STATUS(
builder.GetOfflinePlannedOffsets(&offline_planner_offsets));

// Allocate buffers for variable tensors.
for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size();
subgraph_idx++) {
const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx);
TFLITE_DCHECK(subgraph != nullptr);
TF_LITE_ENSURE_STATUS(AllocateVariables(
subgraph, allocations[subgraph_idx].tensors, offline_planner_offsets));
}

TF_LITE_ENSURE_STATUS(
builder.InitializeAllocationInfo(offline_planner_offsets, allocations));

Expand Down
7 changes: 4 additions & 3 deletions tensorflow/lite/micro/micro_allocator.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -248,8 +248,9 @@ class MicroAllocator {
virtual TfLiteStatus AllocateTfLiteEvalTensors(
const Model* model, SubgraphAllocations* subgraph_allocations);
// Allocates persistent tensor buffers for variable tensors in the subgraph.
virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph,
TfLiteEvalTensor* eval_tensors);
virtual TfLiteStatus AllocateVariables(
const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors,
const int32_t* offline_planner_offsets = nullptr);

// Allocate and return a persistent TfLiteTensor.
// TODO(b/162311891): Drop this method when the interpreter has an API for
Expand Down
9 changes: 5 additions & 4 deletions tensorflow/lite/micro/recording_micro_allocator.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -192,11 +192,12 @@ TfLiteStatus RecordingMicroAllocator::AllocateTfLiteEvalTensors(
}

TfLiteStatus RecordingMicroAllocator::AllocateVariables(
const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) {
const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors,
const int32_t* offline_planner_offsets) {
RecordedAllocation allocations = SnapshotAllocationUsage();

TfLiteStatus status =
MicroAllocator::AllocateVariables(subgraph, eval_tensors);
TfLiteStatus status = MicroAllocator::AllocateVariables(
subgraph, eval_tensors, offline_planner_offsets);

RecordAllocationUsage(allocations,
recorded_tflite_tensor_variable_buffer_data_);
Expand Down
7 changes: 4 additions & 3 deletions tensorflow/lite/micro/recording_micro_allocator.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -77,8 +77,9 @@ class RecordingMicroAllocator : public MicroAllocator {
const Model* model, SubgraphAllocations* subgraph_allocations) override;
TfLiteStatus AllocateTfLiteEvalTensors(
const Model* model, SubgraphAllocations* subgraph_allocations) override;
TfLiteStatus AllocateVariables(const SubGraph* subgraph,
TfLiteEvalTensor* eval_tensors) override;
TfLiteStatus AllocateVariables(
const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors,
const int32_t* offline_planner_offsets = nullptr) override;
// TODO(b/162311891): Once all kernels have been updated to the new API drop
// this method. It is only used to record TfLiteTensor persistent allocations.
TfLiteTensor* AllocatePersistentTfLiteTensorInternal() override;
Expand Down

0 comments on commit 3c227d3

Please sign in to comment.