From c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 Mon Sep 17 00:00:00 2001 From: John Mcloughlin Date: Fri, 24 Mar 2023 12:07:25 +0000 Subject: IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions Signed-off-by: John Mcloughlin Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c --- delegate/classic/src/armnn_delegate.cpp | 6 +-- .../classic/src/test/ArmnnClassicDelegateTest.cpp | 2 +- delegate/common/include/DelegateOptions.hpp | 8 ++-- delegate/common/src/DelegateOptions.cpp | 47 +++++++++++----------- delegate/opaque/src/armnn_delegate.cpp | 6 +-- delegate/test/DelegateOptionsTest.cpp | 17 ++++---- 6 files changed, 45 insertions(+), 41 deletions(-) (limited to 'delegate') diff --git a/delegate/classic/src/armnn_delegate.cpp b/delegate/classic/src/armnn_delegate.cpp index 4ddfc1a35f..b494a36769 100644 --- a/delegate/classic/src/armnn_delegate.cpp +++ b/delegate/classic/src/armnn_delegate.cpp @@ -335,7 +335,7 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext, DelegateData delegateData(delegate->m_Options.GetBackends()); // Build ArmNN Network - armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions; + armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions(); armnn::NetworkId networkId; delegateData.m_Network = armnn::INetwork::Create(networkOptions); @@ -424,11 +424,11 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext, armnn::MemorySource inputSource = armnn::MemorySource::Undefined; armnn::MemorySource outputSource = armnn::MemorySource::Undefined; // There's a bit of an assumption here that the delegate will only support Malloc memory source. - if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled) + if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled()) { inputSource = armnn::MemorySource::Malloc; } - if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled) + if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled()) { outputSource = armnn::MemorySource::Malloc; } diff --git a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp index 26acfe91f1..409b769273 100644 --- a/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp +++ b/delegate/classic/src/test/ArmnnClassicDelegateTest.cpp @@ -76,7 +76,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered") // Create the Armnn Delegate std::vector backends = { armnn::Compute::CpuRef }; - armnn::OptimizerOptions optimizerOptions(true, true, false, true); + armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, true); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); std::unique_ptr diff --git a/delegate/common/include/DelegateOptions.hpp b/delegate/common/include/DelegateOptions.hpp index 3bf9b35191..abf446a402 100644 --- a/delegate/common/include/DelegateOptions.hpp +++ b/delegate/common/include/DelegateOptions.hpp @@ -32,12 +32,12 @@ public: armnn::Optional logSeverityLevel = armnn::EmptyOptional()); DelegateOptions(armnn::Compute computeDevice, - const armnn::OptimizerOptions& optimizerOptions, + const armnn::OptimizerOptionsOpaque& optimizerOptions, const armnn::Optional& logSeverityLevel = armnn::EmptyOptional(), const armnn::Optional& func = armnn::EmptyOptional()); DelegateOptions(const std::vector& backends, - const armnn::OptimizerOptions& optimizerOptions, + const armnn::OptimizerOptionsOpaque& optimizerOptions, const armnn::Optional& logSeverityLevel = armnn::EmptyOptional(), const armnn::Optional& func = armnn::EmptyOptional()); @@ -218,9 +218,9 @@ public: bool IsLoggingEnabled(); - const armnn::OptimizerOptions& GetOptimizerOptions() const; + const armnn::OptimizerOptionsOpaque& GetOptimizerOptions() const; - void SetOptimizerOptions(const armnn::OptimizerOptions& optimizerOptions); + void SetOptimizerOptions(const armnn::OptimizerOptionsOpaque& optimizerOptions); const armnn::Optional& GetDebugCallbackFunction() const; diff --git a/delegate/common/src/DelegateOptions.cpp b/delegate/common/src/DelegateOptions.cpp index c4f0ad71bc..f8892c4665 100644 --- a/delegate/common/src/DelegateOptions.cpp +++ b/delegate/common/src/DelegateOptions.cpp @@ -32,7 +32,7 @@ struct DelegateOptionsImpl } explicit DelegateOptionsImpl(armnn::Compute computeDevice, - const armnn::OptimizerOptions& optimizerOptions, + const armnn::OptimizerOptionsOpaque& optimizerOptions, const armnn::Optional& logSeverityLevel, const armnn::Optional& func) : p_Backends({computeDevice}), @@ -44,7 +44,7 @@ struct DelegateOptionsImpl } explicit DelegateOptionsImpl(const std::vector& backends, - const armnn::OptimizerOptions& optimizerOptions, + const armnn::OptimizerOptionsOpaque& optimizerOptions, const armnn::Optional& logSeverityLevel, const armnn::Optional& func) : p_Backends(backends), @@ -66,7 +66,7 @@ struct DelegateOptionsImpl armnn::IRuntime::CreationOptions p_RuntimeOptions; /// Options for the optimization step for the network - armnn::OptimizerOptions p_OptimizerOptions; + armnn::OptimizerOptionsOpaque p_OptimizerOptions; /// Internal profiling options. Written to INetworkProperties during model load. /// Indicates whether internal profiling is enabled or not. @@ -118,7 +118,7 @@ DelegateOptions::DelegateOptions(const std::vector& backends, } DelegateOptions::DelegateOptions(armnn::Compute computeDevice, - const armnn::OptimizerOptions& optimizerOptions, + const armnn::OptimizerOptionsOpaque& optimizerOptions, const armnn::Optional& logSeverityLevel, const armnn::Optional& func) : p_DelegateOptionsImpl(std::make_unique(computeDevice, optimizerOptions, @@ -127,7 +127,7 @@ DelegateOptions::DelegateOptions(armnn::Compute computeDevice, } DelegateOptions::DelegateOptions(const std::vector& backends, - const armnn::OptimizerOptions& optimizerOptions, + const armnn::OptimizerOptionsOpaque& optimizerOptions, const armnn::Optional& logSeverityLevel, const armnn::Optional& func) : p_DelegateOptionsImpl(std::make_unique(backends, optimizerOptions, @@ -142,7 +142,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, : p_DelegateOptionsImpl(std::make_unique()) { armnn::IRuntime::CreationOptions runtimeOptions; - armnn::OptimizerOptions optimizerOptions; + armnn::OptimizerOptionsOpaque optimizerOptions; bool internalProfilingState = false; armnn::ProfilingDetailsMethod internalProfilingDetail = armnn::ProfilingDetailsMethod::DetailsWithEvents; for (size_t i = 0; i < num_options; ++i) @@ -182,7 +182,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, { armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", std::string(options_values[i])}}); - optimizerOptions.m_ModelOptions.push_back(option); + optimizerOptions.AddModelOption(option); } else if (std::string(options_keys[i]) == std::string("gpu-tuning-file")) { @@ -204,24 +204,24 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, { armnn::BackendOptions option("GpuAcc", {{"SaveCachedNetwork", armnn::stringUtils::StringToBool(options_values[i])}}); - optimizerOptions.m_ModelOptions.push_back(option); + optimizerOptions.AddModelOption(option); } else if (std::string(options_keys[i]) == std::string("cached-network-filepath")) { armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath", std::string(options_values[i])}}); - optimizerOptions.m_ModelOptions.push_back(option); + optimizerOptions.AddModelOption(option); } // Process GPU & CPU backend options else if (std::string(options_keys[i]) == std::string("enable-fast-math")) { armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled", - armnn::stringUtils::StringToBool(options_values[i])}}); - optimizerOptions.m_ModelOptions.push_back(modelOptionGpu); + armnn::stringUtils::StringToBool(options_values[i])}}); + optimizerOptions.AddModelOption(modelOptionGpu); armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled", - armnn::stringUtils::StringToBool(options_values[i])}}); - optimizerOptions.m_ModelOptions.push_back(modelOptionCpu); + armnn::stringUtils::StringToBool(options_values[i])}}); + optimizerOptions.AddModelOption(modelOptionCpu); } // Process CPU backend options else if (std::string(options_keys[i]) == std::string("number-of-threads")) @@ -229,17 +229,17 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, unsigned int numberOfThreads = armnn::numeric_cast(atoi(options_values[i])); armnn::BackendOptions modelOption("CpuAcc", {{"NumberOfThreads", numberOfThreads}}); - optimizerOptions.m_ModelOptions.push_back(modelOption); + optimizerOptions.AddModelOption(modelOption); } // Process reduce-fp32-to-fp16 option else if (std::string(options_keys[i]) == std::string("reduce-fp32-to-fp16")) { - optimizerOptions.m_ReduceFp32ToFp16 = armnn::stringUtils::StringToBool(options_values[i]); + optimizerOptions.SetReduceFp32ToFp16(armnn::stringUtils::StringToBool(options_values[i])); } // Process debug-data else if (std::string(options_keys[i]) == std::string("debug-data")) { - optimizerOptions.m_Debug = armnn::stringUtils::StringToBool(options_values[i]); + optimizerOptions.SetDebugEnabled(armnn::stringUtils::StringToBool(options_values[i])); } // Infer output-shape else if (std::string(options_keys[i]) == std::string("infer-output-shape")) @@ -248,7 +248,7 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, { { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) } }); - optimizerOptions.m_ModelOptions.push_back(backendOption); + optimizerOptions.AddModelOption(backendOption); } // Allow expanded dims else if (std::string(options_keys[i]) == std::string("allow-expanded-dims")) @@ -257,18 +257,18 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, { { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) } }); - optimizerOptions.m_ModelOptions.push_back(backendOption); + optimizerOptions.AddModelOption(backendOption); } // Process memory-import else if (std::string(options_keys[i]) == std::string("memory-import")) { - optimizerOptions.m_ImportEnabled = armnn::stringUtils::StringToBool(options_values[i]); + optimizerOptions.SetImportEnabled(armnn::stringUtils::StringToBool(options_values[i])); } // Process enable-internal-profiling else if (std::string(options_keys[i]) == std::string("enable-internal-profiling")) { internalProfilingState = *options_values[i] != '0'; - optimizerOptions.m_ProfilingEnabled = internalProfilingState; + optimizerOptions.SetProfilingEnabled(internalProfilingState); } // Process internal-profiling-detail else if (std::string(options_keys[i]) == std::string("internal-profiling-detail")) @@ -312,7 +312,8 @@ DelegateOptions::DelegateOptions(char const* const* options_keys, // Process file-only-external-profiling else if (std::string(options_keys[i]) == std::string("file-only-external-profiling")) { - runtimeOptions.m_ProfilingOptions.m_FileOnly = armnn::stringUtils::StringToBool(options_values[i]); + runtimeOptions.m_ProfilingOptions.m_FileOnly = + armnn::stringUtils::StringToBool(options_values[i]); } // Process counter-capture-period else if (std::string(options_keys[i]) == std::string("counter-capture-period")) @@ -408,12 +409,12 @@ bool DelegateOptions::IsLoggingEnabled() return p_DelegateOptionsImpl->m_LoggingSeverity.has_value(); } -const armnn::OptimizerOptions& DelegateOptions::GetOptimizerOptions() const +const armnn::OptimizerOptionsOpaque& DelegateOptions::GetOptimizerOptions() const { return p_DelegateOptionsImpl->p_OptimizerOptions; } -void DelegateOptions::SetOptimizerOptions(const armnn::OptimizerOptions& optimizerOptions) +void DelegateOptions::SetOptimizerOptions(const armnn::OptimizerOptionsOpaque& optimizerOptions) { p_DelegateOptionsImpl->p_OptimizerOptions = optimizerOptions; } diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index cfaea01bbc..ee1a4ed211 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -400,7 +400,7 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext, DelegateData delegateData(delegate->m_Options.GetBackends()); // Build ArmNN Network - armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions; + armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions(); armnn::NetworkId networkId; delegateData.m_Network = armnn::INetwork::Create(networkOptions); @@ -490,11 +490,11 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext, armnn::MemorySource inputSource = armnn::MemorySource::Undefined; armnn::MemorySource outputSource = armnn::MemorySource::Undefined; // There's a bit of an assumption here that the delegate will only support Malloc memory source. - if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled) + if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled()) { inputSource = armnn::MemorySource::Malloc; } - if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled) + if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled()) { outputSource = armnn::MemorySource::Malloc; } diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp index d84d420977..fd1ef88645 100644 --- a/delegate/test/DelegateOptionsTest.cpp +++ b/delegate/test/DelegateOptionsTest.cpp @@ -26,7 +26,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16") std::vector expectedResult = { 1, 2, 2, 2 }; // Enable ReduceFp32ToFp16 - armnn::OptimizerOptions optimizerOptions(true, true, false, false); + armnn::OptimizerOptionsOpaque optimizerOptions(true, true, false, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, @@ -55,7 +55,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug") std::vector expectedResult = { 1, 2, 2, 2 }; // Enable Debug - armnn::OptimizerOptions optimizerOptions(false, true, false, false); + armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, @@ -83,7 +83,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction") std::vector expectedResult = { 1, 2, 2, 2 }; // Enable debug with debug callback function - armnn::OptimizerOptions optimizerOptions(false, true, false, false); + armnn::OptimizerOptionsOpaque optimizerOptions(false, true, false, false); bool callback = false; auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor) { @@ -121,7 +121,7 @@ TEST_CASE ("ArmnnDelegateOptimizerOptionsImport") std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; - armnn::OptimizerOptions optimizerOptions(false, false, false, true); + armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, true); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_UINT8, @@ -227,7 +227,8 @@ TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test") }); modelOptions.push_back(cpuAcc); - armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false); + armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false, + false, modelOptions, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); DelegateOptionTest(::tflite::TensorType_FLOAT32, @@ -256,7 +257,8 @@ TEST_CASE ("ArmnnDelegateSerializeToDot") std::vector divData = { 2, 2, 3, 4 }; std::vector expectedResult = { 1, 2, 2, 2 }; - armnn::OptimizerOptions optimizerOptions(false, false, false, false); + armnn::OptimizerOptionsOpaque optimizerOptions(false, false, + false, false); armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions); // Enable serialize to dot by specifying the target file name. delegateOptions.SetSerializeToDot(filename); @@ -299,7 +301,8 @@ void CreateFp16StringParsingTestRun(std::vector& keys, options_values.get()[i] = values[i].c_str(); } - armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr); + armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), + num_options, nullptr); DelegateOptionTest(::tflite::TensorType_FLOAT32, tensorShape, inputData, -- cgit v1.2.1