aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-10-04 11:17:03 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-10-05 18:18:29 +0000
commit19ad816539037a095749cdd41852292a3d87dd2b (patch)
tree409697e5f10ac6a4e1b86cd86bfba5e0663f16ba
parent727d017aa3559cb33c97a8d77b5a32fbb98b9e35 (diff)
downloadarmnn-19ad816539037a095749cdd41852292a3d87dd2b.tar.gz
IVGCVSW-8060 Add ArmNNSettings parser function for Opaque Delegate
* Add Unit Tests * Update DelegateOptions constructor Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I0e88403ac280e4cf8710ae7ee38b3b56dba42adc
-rw-r--r--delegate/common/include/DelegateOptions.hpp22
-rw-r--r--delegate/common/src/DelegateOptions.cpp168
-rw-r--r--delegate/opaque/include/armnn_delegate.hpp2
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp64
-rw-r--r--delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp255
5 files changed, 449 insertions, 62 deletions
diff --git a/delegate/common/include/DelegateOptions.hpp b/delegate/common/include/DelegateOptions.hpp
index abf446a402..d4924ccf65 100644
--- a/delegate/common/include/DelegateOptions.hpp
+++ b/delegate/common/include/DelegateOptions.hpp
@@ -120,6 +120,17 @@ public:
* Possible values: ["true"/"false"] \n
* Description: Add debug data for easier troubleshooting
*
+ * Option key: "infer-output-shape" \n
+ * Possible values: ["true"/"false"] \n
+ * Description: Infers output tensor shape from input tensor shape and validate where applicable.
+ *
+ * Option key: "allow-expanded-dims" \n
+ * Possible values: ["true"/"false"] \n
+ * Description: If true will disregard dimensions with a size of 1 when validating tensor shapes but tensor
+ * sizes must still match. \n
+ * This is an Experimental parameter that is incompatible with "infer-output-shape". \n
+ * This parameter may be removed in a later update.
+ *
* Option key: "memory-import" \n
* Possible values: ["true"/"false"] \n
* Description: Enable memory import
@@ -164,17 +175,6 @@ public:
* Possible values: [filenameString] \n
* Description: Serialize the optimized network to the file specified in "dot" format.
*
- * Option key: "infer-output-shape" \n
- * Possible values: ["true"/"false"] \n
- * Description: Infers output tensor shape from input tensor shape and validate where applicable.
- *
- * Option key: "allow-expanded-dims" \n
- * Possible values: ["true"/"false"] \n
- * Description: If true will disregard dimensions with a size of 1 when validating tensor shapes but tensor
- * sizes must still match. \n
- * This is an Experimental parameter that is incompatible with "infer-output-shape". \n
- * This parameter may be removed in a later update.
- *
* Option key: "disable-tflite-runtime-fallback" \n
* Possible values: ["true"/"false"] \n
* Description: Disable TfLite Runtime fallback in the Arm NN TfLite delegate.
diff --git a/delegate/common/src/DelegateOptions.cpp b/delegate/common/src/DelegateOptions.cpp
index dca9af38c4..4596159fce 100644
--- a/delegate/common/src/DelegateOptions.cpp
+++ b/delegate/common/src/DelegateOptions.cpp
@@ -16,9 +16,9 @@ struct DelegateOptionsImpl
DelegateOptionsImpl() = default;
explicit DelegateOptionsImpl(armnn::Compute computeDevice,
- const std::vector<armnn::BackendOptions>& backendOptions,
- const armnn::Optional<armnn::LogSeverity> logSeverityLevel)
- : m_Backends({computeDevice}), m_RuntimeOptions(), m_LoggingSeverity(logSeverityLevel)
+ const std::vector<armnn::BackendOptions>& backendOptions,
+ const armnn::Optional<armnn::LogSeverity> logSeverityLevel)
+ : m_Backends({computeDevice}), m_RuntimeOptions(), m_LoggingSeverity(logSeverityLevel)
{
m_RuntimeOptions.m_BackendOptions = backendOptions;
}
@@ -145,6 +145,10 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
armnn::OptimizerOptionsOpaque optimizerOptions;
bool internalProfilingState = false;
armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents;
+
+ bool GpuAccFound = false;
+ bool CpuAccFound = false;
+
for (size_t i = 0; i < num_options; ++i)
{
// Process backends
@@ -160,6 +164,8 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
pch = strtok (NULL, ",");
}
SetBackends(backends);
+ GpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "GpuAcc");
+ CpuAccFound = std::count(GetBackends().begin(), GetBackends().end(), "CpuAcc");
}
// Process dynamic-backends-path
else if (std::string(options_keys[i]) == std::string("dynamic-backends-path"))
@@ -174,21 +180,45 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
// Process GPU backend options
else if (std::string(options_keys[i]) == std::string("gpu-tuning-level"))
{
- armnn::BackendOptions option("GpuAcc", {{"TuningLevel",
- atoi(options_values[i])}});
- runtimeOptions.m_BackendOptions.push_back(option);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions option("GpuAcc", {{"TuningLevel",
+ atoi(options_values[i])}});
+ runtimeOptions.m_BackendOptions.push_back(option);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: TuningLevel is enabled, but no backends that accept this option are set.";
+ }
}
else if (std::string(options_keys[i]) == std::string("gpu-mlgo-tuning-file"))
{
- armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath",
- std::string(options_values[i])}});
- optimizerOptions.AddModelOption(option);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath",
+ std::string(options_values[i])}});
+ optimizerOptions.AddModelOption(option);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: MLGOTuningFilePath is enabled, but no backends that accept this option are set.";
+ }
}
else if (std::string(options_keys[i]) == std::string("gpu-tuning-file"))
{
- armnn::BackendOptions option("GpuAcc", {{"TuningFile",
- std::string(options_values[i])}});
- runtimeOptions.m_BackendOptions.push_back(option);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions option("GpuAcc", {{"TuningFile",
+ std::string(options_values[i])}});
+ runtimeOptions.m_BackendOptions.push_back(option);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: TuningFile is enabled, but no backends that accept this option are set.";
+ }
}
else if (std::string(options_keys[i]) == std::string("gpu-enable-profiling"))
{
@@ -196,40 +226,82 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
}
else if (std::string(options_keys[i]) == std::string("gpu-kernel-profiling-enabled"))
{
- armnn::BackendOptions option("GpuAcc", {{"KernelProfilingEnabled",
- armnn::stringUtils::StringToBool(options_values[i])}});
- runtimeOptions.m_BackendOptions.push_back(option);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions option("GpuAcc", {{"KernelProfilingEnabled",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ runtimeOptions.m_BackendOptions.push_back(option);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: KernelProfilingEnabled is enabled, but no backends that accept this option are set.";
+ }
}
else if (std::string(options_keys[i]) == std::string("save-cached-network"))
{
- armnn::BackendOptions option("GpuAcc", {{"SaveCachedNetwork",
- armnn::stringUtils::StringToBool(options_values[i])}});
- optimizerOptions.AddModelOption(option);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions option("GpuAcc", {{"SaveCachedNetwork",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ optimizerOptions.AddModelOption(option);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: SaveCachedNetwork is enabled, but no backends that accept this option are set.";
+ }
}
else if (std::string(options_keys[i]) == std::string("cached-network-filepath"))
{
- armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath",
- std::string(options_values[i])}});
- optimizerOptions.AddModelOption(option);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath",
+ std::string(options_values[i])}});
+ optimizerOptions.AddModelOption(option);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: CachedNetworkFilePath is enabled, but no backends that accept this option are set.";
+ }
}
// Process GPU & CPU backend options
else if (std::string(options_keys[i]) == std::string("enable-fast-math"))
{
- armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled",
- armnn::stringUtils::StringToBool(options_values[i])}});
- optimizerOptions.AddModelOption(modelOptionGpu);
-
- armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled",
- armnn::stringUtils::StringToBool(options_values[i])}});
- optimizerOptions.AddModelOption(modelOptionCpu);
+ if (GpuAccFound)
+ {
+ armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ optimizerOptions.AddModelOption(modelOptionGpu);
+ }
+ if (CpuAccFound)
+ {
+ armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled",
+ armnn::stringUtils::StringToBool(options_values[i])}});
+ optimizerOptions.AddModelOption(modelOptionCpu);
+ }
+ if (!GpuAccFound and !CpuAccFound)
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: Fastmath is enabled, but no backends that accept this option are set.";
+ }
}
// Process CPU backend options
else if (std::string(options_keys[i]) == std::string("number-of-threads"))
{
- unsigned int numberOfThreads = armnn::numeric_cast<unsigned int>(atoi(options_values[i]));
- armnn::BackendOptions modelOption("CpuAcc",
- {{"NumberOfThreads", numberOfThreads}});
- optimizerOptions.AddModelOption(modelOption);
+ if (CpuAccFound)
+ {
+ unsigned int numberOfThreads = armnn::numeric_cast<unsigned int>(atoi(options_values[i]));
+ armnn::BackendOptions modelOption("CpuAcc",
+ {{"NumberOfThreads", numberOfThreads}});
+ optimizerOptions.AddModelOption(modelOption);
+ }
+ else
+ {
+ ARMNN_LOG(warning) <<
+ "WARNING: NumberOfThreads is enabled, but no backends that accept this option are set.";
+ }
}
// Process reduce-fp32-to-fp16 option
else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16"))
@@ -244,20 +316,19 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
// Infer output-shape
else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
{
- armnn::BackendOptions backendOption("ShapeInferenceMethod",
+ if (armnn::stringUtils::StringToBool(options_values[i]))
+ {
+ optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::InferAndValidate);
+ }
+ else
{
- { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) }
- });
- optimizerOptions.AddModelOption(backendOption);
+ optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly);
+ }
}
// Allow expanded dims
else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
{
- armnn::BackendOptions backendOption("AllowExpandedDims",
- {
- { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) }
- });
- optimizerOptions.AddModelOption(backendOption);
+ optimizerOptions.SetAllowExpandedDims(armnn::stringUtils::StringToBool(options_values[i]));
}
// Process memory-import
else if (std::string(options_keys[i]) == std::string("memory-import"))
@@ -290,14 +361,12 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
// Process enable-external-profiling
else if (std::string(options_keys[i]) == std::string("enable-external-profiling"))
{
- runtimeOptions.m_ProfilingOptions.m_EnableProfiling =
- armnn::stringUtils::StringToBool(options_values[i]);
+ runtimeOptions.m_ProfilingOptions.m_EnableProfiling = armnn::stringUtils::StringToBool(options_values[i]);
}
// Process timeline-profiling
else if (std::string(options_keys[i]) == std::string("timeline-profiling"))
{
- runtimeOptions.m_ProfilingOptions.m_TimelineEnabled =
- armnn::stringUtils::StringToBool(options_values[i]);
+ runtimeOptions.m_ProfilingOptions.m_TimelineEnabled = armnn::stringUtils::StringToBool(options_values[i]);
}
// Process outgoing-capture-file
else if (std::string(options_keys[i]) == std::string("outgoing-capture-file"))
@@ -312,14 +381,12 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
// Process file-only-external-profiling
else if (std::string(options_keys[i]) == std::string("file-only-external-profiling"))
{
- runtimeOptions.m_ProfilingOptions.m_FileOnly =
- armnn::stringUtils::StringToBool(options_values[i]);
+ runtimeOptions.m_ProfilingOptions.m_FileOnly = armnn::stringUtils::StringToBool(options_values[i]);
}
// Process counter-capture-period
else if (std::string(options_keys[i]) == std::string("counter-capture-period"))
{
- runtimeOptions.m_ProfilingOptions.m_CapturePeriod =
- static_cast<uint32_t>(std::stoul(options_values[i]));
+ runtimeOptions.m_ProfilingOptions.m_CapturePeriod = static_cast<uint32_t>(std::stoul(options_values[i]));
}
// Process profiling-file-format
else if (std::string(options_keys[i]) == std::string("profiling-file-format"))
@@ -339,8 +406,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
}
else
{
- throw armnn::Exception("Unknown option for the ArmNN Delegate given: " +
- std::string(options_keys[i]));
+ throw armnn::Exception("Unknown option for the ArmNN Delegate given: " + std::string(options_keys[i]));
}
}
diff --git a/delegate/opaque/include/armnn_delegate.hpp b/delegate/opaque/include/armnn_delegate.hpp
index 474d5978c9..b07d96f639 100644
--- a/delegate/opaque/include/armnn_delegate.hpp
+++ b/delegate/opaque/include/armnn_delegate.hpp
@@ -42,6 +42,8 @@ void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate);
TfLiteStatus DoPrepare(TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data);
+armnnDelegate::DelegateOptions ParseArmNNSettings(const tflite::TFLiteSettings* tflite_settings);
+
/// ArmNN Opaque Delegate
class ArmnnOpaqueDelegate
{
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index 83e90a0026..8e3597d1d3 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -55,6 +55,7 @@
#include <algorithm>
#include <iostream>
#include <sstream>
+#include <regex>
namespace armnnOpaqueDelegate
{
@@ -63,6 +64,69 @@ static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
new tflite::delegates::DelegatePluginRegistry::Register("armnn_delegate",
ArmnnDelegatePlugin::New);
+armnnDelegate::DelegateOptions ParseArmNNSettings(const tflite::TFLiteSettings* tfLiteSettings)
+{
+ const tflite::ArmNNSettings* settings = tfLiteSettings->armnn_settings();
+ ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(settings,
+ "The passed TFLiteSettings did not contain a valid ArmNNSettings");
+
+ // Extract settings fields
+ bool fastmath = settings->fastmath();
+ std::string backends_str = (settings->backends()) ? settings->backends()->str() : "";
+ const ::flatbuffers::String* additional_parameters = settings->additional_parameters();
+
+ // Build additional parameters string
+ std::string additional_parameters_str;
+ if (additional_parameters)
+ {
+ additional_parameters_str = additional_parameters->str();
+
+ // Apply a regex to remove spaces around the = and , signs
+ std::regex regex_equals_str("[ ]*=[ ]*");
+ std::regex regex_comma_str("[ ]*,[ ]*");
+ additional_parameters_str = std::regex_replace(additional_parameters_str, regex_equals_str, "=");
+ additional_parameters_str = std::regex_replace(additional_parameters_str, regex_comma_str, ",");
+ }
+
+ // Build a std::pair list of option names and values
+ std::vector<std::pair<std::string, std::string>> options;
+ options.emplace_back(std::pair<std::string, std::string>("backends", backends_str));
+ options.emplace_back(std::pair<std::string, std::string>("enable-fast-math", (fastmath) ? "true" : "false"));
+
+ std::stringstream additional_parameters_ss(additional_parameters_str);
+ while (additional_parameters_ss.good())
+ {
+ std::string option_str;
+ getline( additional_parameters_ss, option_str, ',' );
+ size_t n = option_str.find("=");
+ if (n != std::string::npos)
+ {
+ std::string name = option_str.substr(0, n);
+ std::string value = option_str.substr(n + 1, std::string::npos);
+ options.emplace_back(std::pair<std::string, std::string>(name, value));
+ }
+ }
+
+ // Build the key and value lists to pass into the constructor of the DelegateOptions
+ size_t num_options = options.size();
+ std::unique_ptr<const char*> options_keys = std::unique_ptr<const char*>(new const char*[num_options + 1]);
+ std::unique_ptr<const char*> options_values = std::unique_ptr<const char*>(new const char*[num_options + 1]);
+
+ for (size_t i=0; i<num_options; ++i)
+ {
+ options_keys.get()[i] = options[i].first.c_str();
+ options_values.get()[i] = options[i].second.c_str();
+ }
+
+ // Finally call the constructor
+ armnnDelegate::DelegateOptions delegateOptions = armnnDelegate::DelegateOptions(options_keys.get(),
+ options_values.get(),
+ num_options,
+ nullptr);
+
+ return delegateOptions;
+}
+
ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
: m_Options(std::move(options))
{
diff --git a/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp b/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
index 9d255b1126..091dcefe8b 100644
--- a/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
+++ b/delegate/opaque/src/test/ArmnnOpaqueDelegateTest.cpp
@@ -59,5 +59,260 @@ TEST_CASE ("DelegatePluginTest")
CHECK((armnnDelegate->opaque_delegate_builder != nullptr));
}
+armnnDelegate::DelegateOptions BuildDelegateOptions(const char* backends,
+ bool fastmath,
+ const char* additional_parameters)
+{
+ flatbuffers::FlatBufferBuilder flatbuffer_builder;
+
+ flatbuffers::Offset<tflite::ArmNNSettings>
+ armnn_settings_offset = tflite::CreateArmNNSettingsDirect(flatbuffer_builder,
+ backends,
+ fastmath,
+ additional_parameters);
+
+ tflite::TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
+ tflite_settings_builder.add_armnn_settings(armnn_settings_offset);
+ flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings_offset = tflite_settings_builder.Finish();
+ flatbuffer_builder.Finish(tflite_settings_offset);
+
+ const tflite::TFLiteSettings* tflite_settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
+ flatbuffer_builder.GetBufferPointer());
+
+ armnnDelegate::DelegateOptions delegateOptions = ParseArmNNSettings(tflite_settings);
+
+ return delegateOptions;
+}
+
+unsigned int CountBackendOptions(armnn::BackendId backendId,
+ armnnDelegate::DelegateOptions& delegateOptions,
+ bool runtime = false)
+{
+ unsigned int count = 0;
+
+ std::vector<armnn::BackendOptions> modelOptions = runtime ? delegateOptions.GetRuntimeOptions().m_BackendOptions
+ : delegateOptions.GetOptimizerOptions().GetModelOptions();
+ for (const auto& backendOptions : modelOptions)
+ {
+ if (backendOptions.GetBackendId() == backendId)
+ {
+ count = backendOptions.GetOptionCount();
+ }
+ }
+
+ return count;
+}
+
+bool GetBackendOption(armnn::BackendId backendId,
+ armnnDelegate::DelegateOptions& delegateOptions,
+ std::string& optionName,
+ armnn::BackendOptions::BackendOption& backendOption,
+ bool runtime = false)
+{
+ bool result = false;
+
+ std::vector<armnn::BackendOptions> modelOptions = runtime ? delegateOptions.GetRuntimeOptions().m_BackendOptions
+ : delegateOptions.GetOptimizerOptions().GetModelOptions();
+
+ for (const auto& backendOptions : modelOptions)
+ {
+ if (backendOptions.GetBackendId() == backendId)
+ {
+ for (size_t i = 0; i < backendOptions.GetOptionCount(); ++i)
+ {
+ const armnn::BackendOptions::BackendOption& option = backendOptions.GetOption(i);
+ if (option.GetName() == optionName)
+ {
+ backendOption = option;
+ result = true;
+ break;
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
+TEST_CASE ("ParseArmNNSettings_backend")
+{
+ {
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("CpuRef,GpuAcc", false, nullptr);
+
+ std::vector<armnn::BackendId> expectedBackends = {"CpuRef", "GpuAcc"};
+ CHECK_EQ(expectedBackends, delegateOptions.GetBackends());
+ }
+ {
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("GpuAcc", false, nullptr);
+
+ std::vector<armnn::BackendId> expectedBackends = {"GpuAcc"};
+ CHECK_EQ(expectedBackends, delegateOptions.GetBackends());
+ }
+}
+
+TEST_CASE ("ParseArmNNSettings_fastmath")
+{
+ // Test fastmath true in both backends
+ {
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("CpuAcc,GpuAcc", true, nullptr);
+
+ std::vector<armnn::BackendId> expectedBackends = {"CpuAcc", "GpuAcc"};
+ CHECK_EQ(expectedBackends, delegateOptions.GetBackends());
+ CHECK_EQ(CountBackendOptions(armnn::Compute::CpuAcc, delegateOptions), 1);
+ CHECK_EQ(CountBackendOptions(armnn::Compute::CpuAcc, delegateOptions), 1);
+
+ armnn::BackendOptions::BackendOption backendOption("", false);
+ std::string optionName = "FastMathEnabled";
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuAcc, delegateOptions, optionName, backendOption), true);
+ CHECK_EQ(backendOption.GetValue().AsBool(), true);
+ CHECK_EQ(backendOption.GetName(), optionName);
+ CHECK_EQ(GetBackendOption(armnn::Compute::GpuAcc, delegateOptions, optionName, backendOption), true);
+ CHECK_EQ(backendOption.GetValue().AsBool(), true);
+ CHECK_EQ(backendOption.GetName(), optionName);
+ }
+
+ // Test fastmath true in one backend
+ {
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("CpuAcc,CpuRef", true, nullptr);
+
+ std::vector<armnn::BackendId> expectedBackends = {"CpuAcc", "CpuRef"};
+ CHECK_EQ(expectedBackends, delegateOptions.GetBackends());
+ CHECK_EQ(CountBackendOptions(armnn::Compute::CpuAcc, delegateOptions), 1);
+ CHECK_EQ(CountBackendOptions(armnn::Compute::CpuRef, delegateOptions), 0);
+
+ armnn::BackendOptions::BackendOption backendOption("", false);
+ std::string optionName = "FastMathEnabled";
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuAcc, delegateOptions, optionName, backendOption), true);
+ CHECK_EQ(backendOption.GetValue().AsBool(), true);
+ CHECK_EQ(backendOption.GetName(), optionName);
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuRef, delegateOptions, optionName, backendOption), false);
+ }
+
+ // Test fastmath false
+ {
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("GpuAcc", false, nullptr);
+
+ std::vector<armnn::BackendId> expectedBackends = {"GpuAcc"};
+ CHECK_EQ(expectedBackends, delegateOptions.GetBackends());
+ CHECK_EQ(CountBackendOptions(armnn::Compute::GpuAcc, delegateOptions), 1);
+
+ armnn::BackendOptions::BackendOption backendOption("", false);
+ std::string optionName = "FastMathEnabled";
+ CHECK_EQ(GetBackendOption(armnn::Compute::GpuAcc, delegateOptions, optionName, backendOption), true);
+ CHECK_EQ(backendOption.GetValue().AsBool(), false);
+ CHECK_EQ(backendOption.GetName(), optionName);
+ }
+}
+
+TEST_CASE ("ParseArmNNSettings_additional_options_raw")
+{
+ const char* backends = "GpuAcc";
+ bool fastmath = false;
+ const char* additional_parameters = "allow-expanded-dims=true";
+
+ flatbuffers::FlatBufferBuilder flatbuffer_builder;
+ flatbuffers::Offset<tflite::ArmNNSettings>
+ armnn_settings_offset = tflite::CreateArmNNSettingsDirect(flatbuffer_builder,
+ backends,
+ fastmath,
+ additional_parameters);
+
+ tflite::TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
+ tflite_settings_builder.add_armnn_settings(armnn_settings_offset);
+ flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings_offset = tflite_settings_builder.Finish();
+ flatbuffer_builder.Finish(tflite_settings_offset);
+
+ const tflite::TFLiteSettings* tflite_settings = flatbuffers::GetRoot<tflite::TFLiteSettings>(
+ flatbuffer_builder.GetBufferPointer());
+ CHECK((tflite_settings->armnn_settings()->additional_parameters() != nullptr));
+ CHECK_EQ(tflite_settings->armnn_settings()->additional_parameters()->str(), additional_parameters);
+
+ armnnDelegate::DelegateOptions delegateOptions = ParseArmNNSettings(tflite_settings);
+ CHECK_EQ(delegateOptions.GetOptimizerOptions().GetAllowExpandedDims(), true);
+}
+
+TEST_CASE ("ParseArmNNSettings_additional_options")
+{
+ std::string options = "number-of-threads=29," // optimizer-backend option only Cpu
+ "gpu-kernel-profiling-enabled=true," // runtime-backend option only GPU
+ "allow-expanded-dims=true," // optimizer option
+ "logging-severity=debug," // option
+ "counter-capture-period=100u"; // runtime-profiling option
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("CpuAcc,GpuAcc", false, options.c_str());
+
+ // Variables to be used in all checks
+ armnn::BackendOptions::BackendOption backendOption("", false);
+ std::string optionName;
+
+ // number-of-threads
+ CHECK_EQ(CountBackendOptions(armnn::Compute::CpuAcc, delegateOptions), 1);
+ optionName = "NumberOfThreads";
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuAcc, delegateOptions, optionName, backendOption), true);
+ CHECK_EQ(backendOption.GetValue().AsUnsignedInt(), 29);
+ CHECK_EQ(backendOption.GetName(), optionName);
+
+ // gpu-kernel-profiling-enabled
+ CHECK_EQ(CountBackendOptions(armnn::Compute::GpuAcc, delegateOptions, true), 1);
+ optionName = "KernelProfilingEnabled";
+ CHECK_EQ(GetBackendOption(armnn::Compute::GpuAcc, delegateOptions, optionName, backendOption, true), true);
+ CHECK_EQ(backendOption.GetValue().AsBool(), true);
+ CHECK_EQ(backendOption.GetName(), optionName);
+
+ // allow-expanded-dims
+ CHECK_EQ(delegateOptions.GetOptimizerOptions().GetAllowExpandedDims(), true);
+
+ // logging-severity
+ CHECK_EQ(delegateOptions.GetLoggingSeverity(), armnn::LogSeverity::Debug);
+
+ // counter-capture-period
+ CHECK_EQ(delegateOptions.GetRuntimeOptions().m_ProfilingOptions.m_CapturePeriod, 100);
+}
+
+TEST_CASE ("ParseArmNNSettings_additional_options_regex")
+{
+ std::string options = "allow-expanded-dims= true, " // optimizer option
+ "number-of-threads =29 ," // optimizer-backend option only Cpu
+ "logging-severity = trace , " // option
+ "counter-capture-period = 100u"; // runtime-profiling option
+ armnnDelegate::DelegateOptions delegateOptions = BuildDelegateOptions("GpuAcc", false, options.c_str());
+
+ // Variables to be used in all checks
+ armnn::BackendOptions::BackendOption backendOption("", false);
+ std::string optionName;
+
+ std::vector<armnn::BackendId> expectedBackends = {"GpuAcc"};
+ CHECK_EQ(expectedBackends, delegateOptions.GetBackends());
+
+ // enable-fast-math
+ CHECK_EQ(CountBackendOptions(armnn::Compute::GpuAcc, delegateOptions), 1);
+ optionName = "FastMathEnabled";
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuRef, delegateOptions, optionName, backendOption), false);
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuAcc, delegateOptions, optionName, backendOption), false);
+ CHECK_EQ(GetBackendOption(armnn::Compute::GpuAcc, delegateOptions, optionName, backendOption), true);
+ CHECK_EQ(backendOption.GetValue().AsBool(), false);
+ CHECK_EQ(backendOption.GetName(), optionName);
+
+ // allow-expanded-dims
+ CHECK_EQ(delegateOptions.GetOptimizerOptions().GetAllowExpandedDims(), true);
+
+ // number-of-threads not saved anywhere, as it is a parameter only valid for CpuAcc
+ optionName="number-of-threads";
+ CHECK_EQ(GetBackendOption(armnn::Compute::CpuAcc, delegateOptions, optionName, backendOption), false);
+ CHECK_EQ(GetBackendOption(armnn::Compute::GpuAcc, delegateOptions, optionName, backendOption), false);
+
+ // logging-severity
+ CHECK_EQ(delegateOptions.GetLoggingSeverity(), armnn::LogSeverity::Trace);
+
+ // counter-capture-period
+ CHECK_EQ(delegateOptions.GetRuntimeOptions().m_ProfilingOptions.m_CapturePeriod, 100);
+}
+
+TEST_CASE ("ParseArmNNSettings_additional_options_incorrect")
+{
+ std::string options = "number-of-thread=29"; // The correct one would be "number-of-threads" in plural
+
+ CHECK_THROWS(BuildDelegateOptions("CpuAcc,GpuAcc", false, options.c_str()));
+}
+
}
} // namespace armnnDelegate