aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-10-19 19:13:29 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-10-25 12:02:06 +0000
commit3e4b60897bde2ad7ab5b730c7c5d727e41cc0eef (patch)
tree20fe9535bc7ad775f7a42f3324c86e7671fd3a48
parentc9c28351e6742732a6e8ad268ac93c36b94a8ee0 (diff)
downloadarmnn-3e4b60897bde2ad7ab5b730c7c5d727e41cc0eef.tar.gz
IVGCVSW-7722 Add ArmNNSettings to Opaque Delegate
* Fix order for reading options to read backend first independently of the order given Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ia87b5920c7cd79b3e66bb6e5779e2355b21a7ec6
-rw-r--r--delegate/classic/src/test/ArmnnClassicDelegateTest.cpp4
-rw-r--r--delegate/common/src/DelegateOptions.cpp46
-rw-r--r--delegate/opaque/include/armnn_delegate.hpp19
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp6
-rw-r--r--delegate/opaque/src/armnn_external_delegate.cpp5
-rw-r--r--delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp112
-rw-r--r--delegate/opaque/src/test/DelegateTestInterpreter.cpp16
7 files changed, 153 insertions, 55 deletions
diff --git a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
index e83f47f244..bdde301057 100644
--- a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
+++ b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp
@@ -18,7 +18,7 @@ namespace armnnDelegate
TEST_SUITE("ArmnnDelegate")
{
-TEST_CASE ("ArmnnDelegate Registered")
+TEST_CASE ("ArmnnDelegate_Registered")
{
using namespace tflite;
auto tfLiteInterpreter = std::make_unique<Interpreter>();
@@ -60,7 +60,7 @@ TEST_CASE ("ArmnnDelegate Registered")
CHECK(tfLiteInterpreter != nullptr);
}
-TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered")
+TEST_CASE ("ArmnnDelegate_OptimizerOptionsRegistered")
{
using namespace tflite;
auto tfLiteInterpreter = std::make_unique<Interpreter>();
diff --git a/delegate/common/src/DelegateOptions.cpp b/delegate/common/src/DelegateOptions.cpp
index 4596159fce..3b839971d8 100644
--- a/delegate/common/src/DelegateOptions.cpp
+++ b/delegate/common/src/DelegateOptions.cpp
@@ -146,38 +146,47 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
bool internalProfilingState = false;
armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
+ // Process backends
bool GpuAccFound = false;
bool CpuAccFound = false;
-
for (size_t i = 0; i < num_options; ++i)
{
- // Process backends
if (std::string(options_keys[i]) == std::string("backends"))
{
// The backend option is a comma separated string of backendIDs that needs to be split
std::vector<armnn::BackendId> backends;
- char* dup = strdup(options_values[i]);
- char* pch = std::strtok(dup, ",");
+ char *dup = strdup(options_values[i]);
+ char *pch = std::strtok(dup, ",");
while (pch != NULL)
{
backends.push_back(pch);
- pch = strtok (NULL, ",");
+ pch = strtok(NULL, ",");
}
SetBackends(backends);
GpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "GpuAcc");
CpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "CpuAcc");
+ break;
}
- // Process dynamic-backends-path
+ }
+
+ // Rest of options after knowing the backend
+ for (size_t i = 0; i < num_options; ++i)
+ {
+ if (std::string(options_keys[i]) == std::string("backends"))
+ {
+ continue;
+ }
+ // Process dynamic-backends-path
else if (std::string(options_keys[i]) == std::string("dynamic-backends-path"))
{
runtimeOptions.m_DynamicBackendsPath = std::string(options_values[i]);
}
- // Process logging level
+ // Process logging level
else if (std::string(options_keys[i]) == std::string("logging-severity"))
{
SetLoggingSeverity(options_values[i]);
}
- // Process GPU backend options
+ // Process GPU backend options
else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
{
if (GpuAccFound)
@@ -266,7 +275,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
"WARNING: CachedNetworkFilePath is enabled, but no backends that accept this option are set.";
}
}
- // Process GPU & CPU backend options
+ // Process GPU & CPU backend options
else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
{
if (GpuAccFound)
@@ -287,7 +296,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
"WARNING: Fastmath is enabled, but no backends that accept this option are set.";
}
}
- // Process CPU backend options
+ // Process CPU backend options
else if (std::string(options_keys[i]) == std::string("number-of-threads"))
{
if (CpuAccFound)
@@ -303,17 +312,17 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
"WARNING: NumberOfThreads is enabled, but no backends that accept this option are set.";
}
}
- // Process reduce-fp32-to-fp16 option
+ // Process reduce-fp32-to-fp16 option
else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
{
optimizerOptions.SetReduceFp32ToFp16(armnn::stringUtils::StringToBool(options_values[i]));
}
- // Process debug-data
+ // Process debug-data
else if (std::string(options_keys[i]) == std::string("debug-data"))
{
optimizerOptions.SetDebugEnabled(armnn::stringUtils::StringToBool(options_values[i]));
}
- // Infer output-shape
+ // Infer output-shape
else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
{
if (armnn::stringUtils::StringToBool(options_values[i]))
@@ -325,23 +334,23 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly);
}
}
- // Allow expanded dims
+ // Allow expanded dims
else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
{
optimizerOptions.SetAllowExpandedDims(armnn::stringUtils::StringToBool(options_values[i]));
}
- // Process memory-import
+ // Process memory-import
else if (std::string(options_keys[i]) == std::string("memory-import"))
{
optimizerOptions.SetImportEnabled(armnn::stringUtils::StringToBool(options_values[i]));
}
- // Process enable-internal-profiling
+ // Process enable-internal-profiling
else if (std::string(options_keys[i]) == std::string("enable-internal-profiling"))
{
internalProfilingState = *options_values[i] != '0';
optimizerOptions.SetProfilingEnabled(internalProfilingState);
}
- // Process internal-profiling-detail
+ // Process internal-profiling-detail
else if (std::string(options_keys[i]) == std::string("internal-profiling-detail"))
{
uint32_t detailLevel = static_cast<uint32_t>(std::stoul(options_values[i]));
@@ -358,7 +367,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
break;
}
}
- // Process enable-external-profiling
+ // Process enable-external-profiling
else if (std::string(options_keys[i]) == std::string("enable-external-profiling"))
{
runtimeOptions.m_ProfilingOptions.m_EnableProfiling = armnn::stringUtils::StringToBool(options_values[i]);
@@ -398,7 +407,6 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
{
SetSerializeToDot(options_values[i]);
}
-
// Process disable-tflite-runtime-fallback
else if (std::string(options_keys[i]) == std::string("disable-tflite-runtime-fallback"))
{
diff --git a/delegate/opaque/include/armnn_delegate.hpp b/delegate/opaque/include/armnn_delegate.hpp
index b07d96f639..ae85556884 100644
--- a/delegate/opaque/include/armnn_delegate.hpp
+++ b/delegate/opaque/include/armnn_delegate.hpp
@@ -36,7 +36,7 @@ struct DelegateData
/// Forward declaration for functions initializing the ArmNN Delegate
::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault();
-TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings);
+TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options);
void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate);
@@ -96,16 +96,15 @@ using TfLiteOpaqueDelegatePtr = tflite::delegates::TfLiteDelegatePtr;
class ArmnnDelegatePlugin : public DelegatePluginInterface
{
public:
- static std::unique_ptr<ArmnnDelegatePlugin> New(const tflite::TFLiteSettings& tflite_settings)
+ static std::unique_ptr<ArmnnDelegatePlugin> New(const tflite::TFLiteSettings& tfliteSettings)
{
- return std::make_unique<ArmnnDelegatePlugin>(tflite_settings);
+ return std::make_unique<ArmnnDelegatePlugin>(tfliteSettings);
}
tflite::delegates::TfLiteDelegatePtr Create() override
{
- // Use default settings until options have been enabled.
- return tflite::delegates::TfLiteDelegatePtr(
- TfLiteArmnnOpaqueDelegateCreate(nullptr), TfLiteArmnnOpaqueDelegateDelete);
+ return tflite::delegates::TfLiteDelegatePtr(TfLiteArmnnOpaqueDelegateCreate(m_delegateOptions),
+ TfLiteArmnnOpaqueDelegateDelete);
}
int GetDelegateErrno(TfLiteOpaqueDelegate* from_delegate) override
@@ -114,9 +113,11 @@ public:
}
explicit ArmnnDelegatePlugin(const tflite::TFLiteSettings& tfliteSettings)
- {
- // Use default settings until options have been enabled.
- }
+ : m_delegateOptions(ParseArmNNSettings(&tfliteSettings))
+ {}
+
+private:
+ armnnDelegate::DelegateOptions m_delegateOptions;
};
/// ArmnnSubgraph class where parsing the nodes to ArmNN format and creating the ArmNN Graph
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 8e3597d1d3..129bc4333b 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -258,12 +258,8 @@ TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate*
return status;
}
-TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
+TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options)
{
- // This method will always create Opaque Delegate with default settings until
- // we have a DelegateOptions Constructor which can parse the void* settings
- armnn::IgnoreUnused(settings);
- auto options = TfLiteArmnnDelegateOptionsDefault();
auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
}
diff --git a/delegate/opaque/src/armnn_external_delegate.cpp b/delegate/opaque/src/armnn_external_delegate.cpp
index 6cc29f3fe1..aa1f3355ab 100644
--- a/delegate/opaque/src/armnn_external_delegate.cpp
+++ b/delegate/opaque/src/armnn_external_delegate.cpp
@@ -9,7 +9,10 @@ namespace {
TfLiteOpaqueDelegate* ArmNNDelegateCreateFunc(const void* tflite_settings)
{
- auto delegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(tflite_settings);
+ armnnDelegate::DelegateOptions opt = armnnOpaqueDelegate::ParseArmNNSettings(
+ static_cast<const tflite::TFLiteSettings*>(tflite_settings));
+
+ auto delegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(opt);
return delegate;
}
diff --git a/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp b/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
index 091dcefe8b..1562c9f837 100644
--- a/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
+++ b/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
@@ -8,12 +8,95 @@
#include <opaque/include/armnn_delegate.hpp>
+#include <tensorflow/lite/kernels/builtin_op_kernels.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include "tensorflow/lite/core/c/builtin_op_data.h"
+
namespace armnnOpaqueDelegate
{
TEST_SUITE("ArmnnOpaqueDelegate")
{
+TEST_CASE ("ArmnnOpaqueDelegate_Registered")
+{
+ using namespace tflite;
+ auto tfLiteInterpreter = std::make_unique<Interpreter>();
+
+ tfLiteInterpreter->AddTensors(3);
+ tfLiteInterpreter->SetInputs({0, 1});
+ tfLiteInterpreter->SetOutputs({2});
+
+ tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
+ tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
+ tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
+
+ TfLiteAddParams* addParams = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
+ addParams->activation = kTfLiteActNone;
+ addParams->pot_scale_int16 = false;
+
+ tflite::ops::builtin::BuiltinOpResolver opResolver;
+ const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
+ tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, addParams, opRegister);
+
+ // Create the Armnn Delegate
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendOptions> backendOptions;
+ backendOptions.emplace_back(
+ armnn::BackendOptions{ "BackendName",
+ {
+ { "Option1", 42 },
+ { "Option2", true }
+ }}
+ );
+
+ armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete)>
+ theArmnnDelegate(armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions),
+ armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete);
+
+ auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+ CHECK(status == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+}
+
+TEST_CASE ("ArmnnOpaqueDelegate_OptimizerOptionsRegistered")
+{
+ using namespace tflite;
+ auto tfLiteInterpreter = std::make_unique<Interpreter>();
+
+ tfLiteInterpreter->AddTensors(3);
+ tfLiteInterpreter->SetInputs({0, 1});
+ tfLiteInterpreter->SetOutputs({2});
+
+ tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
+ tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
+ tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
+
+ TfLiteAddParams* addParams = reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
+ addParams->activation = kTfLiteActNone;
+ addParams->pot_scale_int16 = false;
+
+ tflite::ops::builtin::BuiltinOpResolver opResolver;
+ const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
+ tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, addParams, opRegister);
+
+ // Create the Armnn Delegate
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+
+ armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, true);
+
+ armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete)>
+ theArmnnDelegate(armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions),
+ armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateDelete);
+
+ auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+ CHECK(status == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+}
+
TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
{
// Check default options can be created
@@ -28,7 +111,7 @@ TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
CHECK(builder);
// Check Opaque delegate created
- auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(&options);
+ auto opaqueDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(options);
CHECK(opaqueDelegate);
// Check Opaque Delegate can be deleted
@@ -38,16 +121,27 @@ TEST_CASE ("DelegateOptions_OpaqueDelegateDefault")
TEST_CASE ("DelegatePluginTest")
{
- // Use default settings until options have been enabled.
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
- tflite::TFLiteSettingsBuilder tfliteSettingsBuilder(flatBufferBuilder);
- flatbuffers::Offset<tflite::TFLiteSettings> tfliteSettings = tfliteSettingsBuilder.Finish();
- flatBufferBuilder.Finish(tfliteSettings);
- const tflite::TFLiteSettings* settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
- flatBufferBuilder.GetBufferPointer());
+ const char* backends = "CpuRef";
+ bool fastmath = false;
+ const char* additional_parameters = "allow-expanded-dims=true";
+
+ flatbuffers::FlatBufferBuilder flatbuffer_builder;
+ flatbuffers::Offset<tflite::ArmNNSettings>
+ armnn_settings_offset = tflite::CreateArmNNSettingsDirect(flatbuffer_builder,
+ backends,
+ fastmath,
+ additional_parameters);
+
+ tflite::TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
+ tflite_settings_builder.add_armnn_settings(armnn_settings_offset);
+ flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings_offset = tflite_settings_builder.Finish();
+ flatbuffer_builder.Finish(tflite_settings_offset);
+
+ const tflite::TFLiteSettings* tflite_settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
+ flatbuffer_builder.GetBufferPointer());
std::unique_ptr<tflite::delegates::DelegatePluginInterface> delegatePlugin =
- tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *settings);
+ tflite::delegates::DelegatePluginRegistry::CreateByName("armnn_delegate", *tflite_settings);
// Plugin is created correctly using armnn_delegate name.
CHECK((delegatePlugin != nullptr));
diff --git a/delegate/opaque/src/test/DelegateTestInterpreter.cpp b/delegate/opaque/src/test/DelegateTestInterpreter.cpp
index 04e6ad6208..c46d3e166a 100644
--- a/delegate/opaque/src/test/DelegateTestInterpreter.cpp
+++ b/delegate/opaque/src/test/DelegateTestInterpreter.cpp
@@ -7,7 +7,6 @@
#include <armnn_delegate.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
namespace delegateTestInterpreter
{
@@ -17,9 +16,6 @@ DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
const std::string& customOp,
bool disableFallback)
{
- armnn::IgnoreUnused(backends);
- armnn::IgnoreUnused(disableFallback);
-
TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
@@ -28,8 +24,11 @@ DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
}
- // Use default settings until options have been enabled.
- auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(nullptr);
+ // Disable fallback by default for unit tests unless specified.
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ delegateOptions.DisableTfLiteRuntimeFallback(disableFallback);
+
+ auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions);
TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
m_TfLiteDelegate = armnnDelegate;
@@ -44,8 +43,6 @@ DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
const armnnDelegate::DelegateOptions& delegateOptions,
const std::string& customOp)
{
- armnn::IgnoreUnused(delegateOptions);
-
TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
@@ -54,8 +51,7 @@ DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
}
- // Use default settings until options have been enabled.
- auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(nullptr);
+ auto armnnDelegate = armnnOpaqueDelegate::TfLiteArmnnOpaqueDelegateCreate(delegateOptions);
TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
m_TfLiteDelegate = armnnDelegate;