Skip to content

Commit

Permalink
IVGCVSW-7722 Add ArmNNSettings to Opaque Delegate
Browse files Browse the repository at this point in the history
* Fix order for reading options to read backend first independently of the order given

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ia87b5920c7cd79b3e66bb6e5779e2355b21a7ec6
  • Loading branch information
TeresaARM committed Oct 25, 2023
1 parent c9c2835 commit 3e4b608
Show file tree
Hide file tree
Showing 7 changed files with 153 additions and 55 deletions.
4 changes: 2 additions & 2 deletions delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
Expand Up @@ -18,7 +18,7 @@ namespace armnnDelegate
TEST_SUITE("ArmnnDelegate")
{

TEST_CASE ("ArmnnDelegate Registered")
TEST_CASE ("ArmnnDelegate_Registered")
{
using namespace tflite;
auto tfLiteInterpreter = std::make_unique<Interpreter>();
Expand Down Expand Up @@ -60,7 +60,7 @@ TEST_CASE ("ArmnnDelegate Registered")
CHECK(tfLiteInterpreter != nullptr);
}

TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered")
TEST_CASE ("ArmnnDelegate_OptimizerOptionsRegistered")
{
using namespace tflite;
auto tfLiteInterpreter = std::make_unique<Interpreter>();
Expand Down
46 changes: 27 additions & 19 deletions delegate/common/src/DelegateOptions.cpp
Expand Up @@ -146,38 +146,47 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
bool internalProfilingState = false;
armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;

// Process backends
bool GpuAccFound = false;
bool CpuAccFound = false;

for (size_t i = 0; i < num_options; ++i)
{
// Process backends
if (std::string(options_keys[i]) == std::string("backends"))
{
// The backend option is a comma separated string of backendIDs that needs to be split
std::vector<armnn::BackendId> backends;
char* dup = strdup(options_values[i]);
char* pch = std::strtok(dup, ",");
char *dup = strdup(options_values[i]);
char *pch = std::strtok(dup, ",");
while (pch != NULL)
{
backends.push_back(pch);
pch = strtok (NULL, ",");
pch = strtok(NULL, ",");
}
SetBackends(backends);
GpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "GpuAcc");
CpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "CpuAcc");
break;
}
// Process dynamic-backends-path
}

// Rest of options after knowing the backend
for (size_t i = 0; i < num_options; ++i)
{
if (std::string(options_keys[i]) == std::string("backends"))
{
continue;
}
// Process dynamic-backends-path
else if (std::string(options_keys[i]) == std::string("dynamic-backends-path"))
{
runtimeOptions.m_DynamicBackendsPath = std::string(options_values[i]);
}
// Process logging level
// Process logging level
else if (std::string(options_keys[i]) == std::string("logging-severity"))
{
SetLoggingSeverity(options_values[i]);
}
// Process GPU backend options
// Process GPU backend options
else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
{
if (GpuAccFound)
Expand Down Expand Up @@ -266,7 +275,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
"WARNING: CachedNetworkFilePath is enabled, but no backends that accept this option are set.";
}
}
// Process GPU & CPU backend options
// Process GPU & CPU backend options
else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
{
if (GpuAccFound)
Expand All @@ -287,7 +296,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
"WARNING: Fastmath is enabled, but no backends that accept this option are set.";
}
}
// Process CPU backend options
// Process CPU backend options
else if (std::string(options_keys[i]) == std::string("number-of-threads"))
{
if (CpuAccFound)
Expand All @@ -303,17 +312,17 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
"WARNING: NumberOfThreads is enabled, but no backends that accept this option are set.";
}
}
// Process reduce-fp32-to-fp16 option
// Process reduce-fp32-to-fp16 option
else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
{
optimizerOptions.SetReduceFp32ToFp16(armnn::stringUtils::StringToBool(options_values[i]));
}
// Process debug-data
// Process debug-data
else if (std::string(options_keys[i]) == std::string("debug-data"))
{
optimizerOptions.SetDebugEnabled(armnn::stringUtils::StringToBool(options_values[i]));
}
// Infer output-shape
// Infer output-shape
else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
{
if (armnn::stringUtils::StringToBool(options_values[i]))
Expand All @@ -325,23 +334,23 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly);
}
}
// Allow expanded dims
// Allow expanded dims
else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
{
optimizerOptions.SetAllowExpandedDims(armnn::stringUtils::StringToBool(options_values[i]));
}
// Process memory-import
// Process memory-import
else if (std::string(options_keys[i]) == std::string("memory-import"))
{
optimizerOptions.SetImportEnabled(armnn::stringUtils::StringToBool(options_values[i]));
}
// Process enable-internal-profiling
// Process enable-internal-profiling
else if (std::string(options_keys[i]) == std::string("enable-internal-profiling"))
{
internalProfilingState = *options_values[i] != '0';
optimizerOptions.SetProfilingEnabled(internalProfilingState);
}
// Process internal-profiling-detail
// Process internal-profiling-detail
else if (std::string(options_keys[i]) == std::string("internal-profiling-detail"))
{
uint32_t detailLevel = static_cast<uint32_t>(std::stoul(options_values[i]));
Expand All @@ -358,7 +367,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
break;
}
}
// Process enable-external-profiling
// Process enable-external-profiling
else if (std::string(options_keys[i]) == std::string("enable-external-profiling"))
{
runtimeOptions.m_ProfilingOptions.m_EnableProfiling = armnn::stringUtils::StringToBool(options_values[i]);
Expand Down Expand Up @@ -398,7 +407,6 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
{
SetSerializeToDot(options_values[i]);
}

// Process disable-tflite-runtime-fallback
else if (std::string(options_keys[i]) == std::string("disable-tflite-runtime-fallback"))
{
Expand Down
19 changes: 10 additions & 9 deletions delegate/opaque/include/armnn_delegate.hpp
Expand Up @@ -36,7 +36,7 @@ struct DelegateData
/// Forward declaration for functions initializing the ArmNN Delegate
::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault();

TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings);
TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options);

void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate);

Expand Down Expand Up @@ -96,16 +96,15 @@ using TfLiteOpaqueDelegatePtr = tflite::delegates::TfLiteDelegatePtr;
class ArmnnDelegatePlugin : public DelegatePluginInterface
{
public:
static std::unique_ptr<ArmnnDelegatePlugin> New(const tflite::TFLiteSettings& tflite_settings)
static std::unique_ptr<ArmnnDelegatePlugin> New(const tflite::TFLiteSettings& tfliteSettings)
{
return std::make_unique<ArmnnDelegatePlugin>(tflite_settings);
return std::make_unique<ArmnnDelegatePlugin>(tfliteSettings);
}

tflite::delegates::TfLiteDelegatePtr Create() override
{
// Use default settings until options have been enabled.
return tflite::delegates::TfLiteDelegatePtr(
TfLiteArmnnOpaqueDelegateCreate(nullptr), TfLiteArmnnOpaqueDelegateDelete);
return tflite::delegates::TfLiteDelegatePtr(TfLiteArmnnOpaqueDelegateCreate(m_delegateOptions),
TfLiteArmnnOpaqueDelegateDelete);
}

int GetDelegateErrno(TfLiteOpaqueDelegate* from_delegate) override
Expand All @@ -114,9 +113,11 @@ class ArmnnDelegatePlugin : public DelegatePluginInterface
}

explicit ArmnnDelegatePlugin(const tflite::TFLiteSettings& tfliteSettings)
{
// Use default settings until options have been enabled.
}
: m_delegateOptions(ParseArmNNSettings(&tfliteSettings))
{}

private:
armnnDelegate::DelegateOptions m_delegateOptions;
};

/// ArmnnSubgraph class where parsing the nodes to ArmNN format and creating the ArmNN Graph
Expand Down
6 changes: 1 addition & 5 deletions delegate/opaque/src/armnn_delegate.cpp
Expand Up @@ -258,12 +258,8 @@ TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate*
return status;
}

TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options)
{
// This method will always create Opaque Delegate with default settings until
// we have a DelegateOptions Constructor which can parse the void* settings
armnn::IgnoreUnused(settings);
auto options = TfLiteArmnnDelegateOptionsDefault();
auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
}
Expand Down
5 changes: 4 additions & 1 deletion delegate/opaque/src/armnn_external_delegate.cpp
Expand Up @@ -9,7 +9,10 @@ namespace {

TfLiteOpaqueDelegate* ArmNNDelegateCreateFunc(const void* tflite_settings)
{
auto delegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(tflite_settings);
armnnDelegate::DelegateOptions opt = armnnOpaqueDelegate::ParseArmNNSettings(
static_cast<const tflite::TFLiteSettings*>(tflite_settings));

auto delegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(opt);
return delegate;
}

Expand Down
112 changes: 103 additions & 9 deletions delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
Expand Up @@ -8,12 +8,95 @@

#include <opaque/include/armnn_delegate.hpp>

#include <tensorflow/lite/kernels/builtin_op_kernels.h>
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"

namespace armnnOpaqueDelegate
{

TEST_SUITE("ArmnnOpaqueDelegate")
{

TEST_CASE ("ArmnnOpaqueDelegate_Registered")
{
using namespace tflite;
auto tfLiteInterpreter = std::make_unique<Interpreter>();

tfLiteInterpreter->AddTensors(3);
tfLiteInterpreter->SetInputs({0, 1});
tfLiteInterpreter->SetOutputs({2});

tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());

TfLiteAddParams* addParams = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
addParams->activation = kTfLiteActNone;
addParams->pot_scale_int16 = false;

tflite::ops::builtin::BuiltinOpResolver opResolver;
const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, addParams, opRegister);

// Create the Armnn Delegate
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
std::vector<armnn::BackendOptions> backendOptions;
backendOptions.emplace_back(
armnn::BackendOptions{ "BackendName",
{
{ "Option1", 42 },
{ "Option2", true }
}}
);

armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
std::unique_ptr<TfLiteDelegate, decltype(&armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete)>
theArmnnDelegate(armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions),
armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete);

auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
CHECK(status == kTfLiteOk);
CHECK(tfLiteInterpreter != nullptr);
}

TEST_CASE ("ArmnnOpaqueDelegate_OptimizerOptionsRegistered")
{
using namespace tflite;
auto tfLiteInterpreter = std::make_unique<Interpreter>();

tfLiteInterpreter->AddTensors(3);
tfLiteInterpreter->SetInputs({0, 1});
tfLiteInterpreter->SetOutputs({2});

tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());

TfLiteAddParams* addParams = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
addParams->activation = kTfLiteActNone;
addParams->pot_scale_int16 = false;

tflite::ops::builtin::BuiltinOpResolver opResolver;
const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, addParams, opRegister);

// Create the Armnn Delegate
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };

armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, true);

armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
std::unique_ptr<TfLiteDelegate, decltype(&armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete)>
theArmnnDelegate(armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions),
armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete);

auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
CHECK(status == kTfLiteOk);
CHECK(tfLiteInterpreter != nullptr);
}

TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
{
// Check default options can be created
Expand All @@ -28,7 +111,7 @@ TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
CHECK(builder);

// Check Opaque delegate created
auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(&options);
auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(options);
CHECK(opaqueDelegate);

// Check Opaque Delegate can be deleted
Expand All @@ -38,16 +121,27 @@ TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")

TEST_CASE ("DelegatePluginTest")
{
// Use default settings until options have been enabled.
flatbuffers::FlatBufferBuilder flatBufferBuilder;
tflite::TFLiteSettingsBuilder tfliteSettingsBuilder(flatBufferBuilder);
flatbuffers::Offset<tflite::TFLiteSettings> tfliteSettings = tfliteSettingsBuilder.Finish();
flatBufferBuilder.Finish(tfliteSettings);
const tflite::TFLiteSettings* settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
flatBufferBuilder.GetBufferPointer());
const char* backends = "CpuRef";
bool fastmath = false;
const char* additional_parameters = "allow-expanded-dims=true";

flatbuffers::FlatBufferBuilder flatbuffer_builder;
flatbuffers::Offset<tflite::ArmNNSettings>
armnn_settings_offset = tflite::CreateArmNNSettingsDirect(flatbuffer_builder,
backends,
fastmath,
additional_parameters);

tflite::TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
tflite_settings_builder.add_armnn_settings(armnn_settings_offset);
flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings_offset = tflite_settings_builder.Finish();
flatbuffer_builder.Finish(tflite_settings_offset);

const tflite::TFLiteSettings* tflite_settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
flatbuffer_builder.GetBufferPointer());

std::unique_ptr<tflite::delegates::DelegatePluginInterface> delegatePlugin =
tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *settings);
tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *tflite_settings);

// Plugin is created correctly using armnn_delegate name.
CHECK((delegatePlugin != nullptr));
Expand Down

0 comments on commit 3e4b608

Please sign in to comment.