Skip to content
Permalink
Browse files Browse the repository at this point in the history
[tflite] Ensure input tensors don't have nullptr buffers.
A crafted TFLite model can force a node to have as input a tensor backed by a `nullptr` buffer. That is, by carefully changing the buffer index in the flatbuffer serialization, we can force the TFLite interpreter to consider a read-only tensor to be a read-write one and assume that there is an operator that has this tensor as output, writing to it and allocating memory before the tensor is used as input. If this does not happen, we get memory corruption.

PiperOrigin-RevId: 332524692
Change-Id: I57ef175152a29020af9ab041dc959e5631dce40f
  • Loading branch information
mihaimaruseac authored and tensorflower-gardener committed Sep 18, 2020
1 parent 00a5ef6 commit 0b5662b
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 18 deletions.
2 changes: 2 additions & 0 deletions tensorflow/lite/BUILD
Expand Up @@ -242,6 +242,7 @@ cc_library(
":arena_planner",
":external_cpu_backend_context",
":graph_info",
":kernel_api",
":memory_planner",
":minimal_logging",
":shared_library",
Expand Down Expand Up @@ -469,6 +470,7 @@ cc_test(
"testdata/add_shared_tensors.bin",
"testdata/empty_model.bin",
"testdata/multi_add_flex.bin",
"testdata/segment_sum_invalid_buffer.bin",
"testdata/sparse_tensor.bin",
"testdata/test_min_runtime.bin",
"testdata/test_model.bin",
Expand Down
14 changes: 14 additions & 0 deletions tensorflow/lite/core/subgraph.cc
Expand Up @@ -19,6 +19,7 @@ limitations under the License.
#include <cstdint>

#include "tensorflow/lite/arena_planner.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/tensor_utils.h"
Expand Down Expand Up @@ -1030,6 +1031,19 @@ TfLiteStatus Subgraph::Invoke() {
tensor->data_is_stale) {
TF_LITE_ENSURE_STATUS(EnsureTensorDataIsReadable(tensor_index));
}
if (tensor->data.raw == nullptr && tensor->bytes > 0) {
if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1) {
// In general, having a tensor here with no buffer will be an error.
// However, for the reshape operator, the second input tensor is only
// used for the shape, not for the data. Thus, null buffer is ok.
continue;
} else {
// In all other cases, we need to return an error as otherwise we will
// trigger a null pointer dereference (likely).
ReportError("Input tensor %d lacks data", tensor_index);
return kTfLiteError;
}
}
}

if (check_cancelled_func_ != nullptr &&
Expand Down
60 changes: 42 additions & 18 deletions tensorflow/lite/model_test.cc
Expand Up @@ -438,24 +438,48 @@ TEST(BasicFlatBufferModel, TestParseModelWithSparseTensor) {
}

// TODO(b/150072943): Add malformed model with sparse tensor tests.
TEST(BasicFlatBufferModel, TestHandleMalformedModel) {
const auto model_paths = {
// These models use the same tensor as both input and ouput of a node
"tensorflow/lite/testdata/add_shared_tensors.bin",
};

for (const auto& model_path : model_paths) {
std::unique_ptr<tflite::FlatBufferModel> model =
FlatBufferModel::BuildFromFile(model_path);
ASSERT_NE(model, nullptr);

tflite::ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
ASSERT_NE(interpreter->AllocateTensors(), kTfLiteOk);
}

// The models here have at least a node that uses the same tensor as input and
// output. This causes segfaults when trying to eval the operator, hence we try
// to prevent this scenario. The earliest place we can check this is in
// `AllocateTensors`, hence the test checks that `interpreter->AllocateTensors`
// detects these bad models.
TEST(BasicFlatBufferModel, TestHandleMalformedModelReuseTensor) {
const auto model_path =
"tensorflow/lite/testdata/add_shared_tensors.bin";

std::unique_ptr<tflite::FlatBufferModel> model =
FlatBufferModel::BuildFromFile(model_path);
ASSERT_NE(model, nullptr);

tflite::ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
ASSERT_NE(interpreter->AllocateTensors(), kTfLiteOk);
}

// The models here have a buffer index for a tensor pointing to a null buffer.
// This results in the tensor being interpreted as read-write, but the model
// assumes the tensor is read-only. As such, `interpreter->Invoke()` would
// segfault if no precondition check is added. The test checks that the
// precondition check exists.
TEST(BasicFlatBufferModel, TestHandleMalformedModelInvalidBuffer) {
const auto model_path =
"tensorflow/lite/testdata/segment_sum_invalid_buffer.bin";

std::unique_ptr<tflite::FlatBufferModel> model =
FlatBufferModel::BuildFromFile(model_path);
ASSERT_NE(model, nullptr);

tflite::ops::builtin::BuiltinOpResolver resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> interpreter;
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter->Invoke(), kTfLiteOk);
}

// TODO(aselle): Add tests for serialization of builtin op data types.
Expand Down
Binary file not shown.

0 comments on commit 0b5662b

Please sign in to comment.