aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2022-12-20 16:21:35 +0000
committerColm Donelan <colm.donelan@arm.com>2022-12-20 16:21:35 +0000
commitda7f2f947a750d8b377cd6bc180a838909f97356 (patch)
treecab18d41810deef8f19677c12cfe7f2ef29141f7
parentcb223b7f485a33242fb9b18bc404bb33c29107e4 (diff)
downloadarmnn-da7f2f947a750d8b377cd6bc180a838909f97356.tar.gz
IVGCVSW-7409 GPU backend options not being passed from the delegate.
Two problems here: * First the Delegate was using the parameter options after the execution of std::move on it. * In ExecuteNetworkParams 3 GPU backend options were instead being set as optimizer options. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: I61c7fad8a5819a0a4aec0243899019a342c5cc5f
-rw-r--r--delegate/src/armnn_delegate.cpp7
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp75
2 files changed, 46 insertions, 36 deletions
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 4d95522dbd..06affca752 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -137,13 +137,12 @@ Delegate::Delegate(armnnDelegate::DelegateOptions options)
m_Options(std::move(options))
{
// Configures logging for ARMNN
- if (options.IsLoggingEnabled())
+ if (m_Options.IsLoggingEnabled())
{
- armnn::ConfigureLogging(true, true, options.GetLoggingSeverity());
+ armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
}
-
// Create ArmNN Runtime
- m_Runtime = armnn::IRuntime::Create(options.GetRuntimeOptions());
+ m_Runtime = armnn::IRuntime::Create(m_Options.GetRuntimeOptions());
std::vector<armnn::BackendId> backends;
if (m_Runtime)
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index fa467c93f8..fbfd1bc936 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -120,56 +120,67 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
armnnDelegate::DelegateOptions delegateOptions(m_ComputeDevices);
delegateOptions.SetDynamicBackendsPath(m_DynamicBackendsPath);
delegateOptions.SetGpuProfilingState(m_EnableProfiling);
-
- armnn::OptimizerOptions options;
- options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
- options.m_Debug = m_PrintIntermediate;
- options.m_DebugToFile = m_PrintIntermediateOutputsToFile;
- options.m_ProfilingEnabled = m_EnableProfiling;
delegateOptions.SetInternalProfilingParams(m_EnableProfiling, armnn::ProfilingDetailsMethod::DetailsWithEvents);
- options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
+
+ // GPU Backend options first.
+ {
+ armnn::BackendOptions gpuOption("GpuAcc", {{"TuningLevel", m_TuningLevel}});
+ delegateOptions.AddBackendOption(gpuOption);
+ }
+ {
+ armnn::BackendOptions gpuOption("GpuAcc", {{"TuningFile", m_TuningPath.c_str()}});
+ delegateOptions.AddBackendOption(gpuOption);
+ }
+ {
+ armnn::BackendOptions gpuOption("GpuAcc", {{"KernelProfilingEnabled", m_EnableProfiling}});
+ delegateOptions.AddBackendOption(gpuOption);
+ }
+
+ // Optimizer options next.
+ armnn::OptimizerOptions optimizerOptions;
+ optimizerOptions.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
+ optimizerOptions.m_Debug = m_PrintIntermediate;
+ optimizerOptions.m_DebugToFile = m_PrintIntermediateOutputsToFile;
+ optimizerOptions.m_ProfilingEnabled = m_EnableProfiling;
+ optimizerOptions.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
if (m_InferOutputShape)
{
- options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::InferAndValidate;
+ optimizerOptions.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::InferAndValidate;
+ armnn::BackendOptions networkOption("ShapeInferenceMethod",
+ {
+ {"InferAndValidate", true}
+ });
+ optimizerOptions.m_ModelOptions.push_back(networkOption);
}
- armnn::BackendOptions gpuAcc("GpuAcc",
- {
- { "FastMathEnabled", m_EnableFastMath },
- { "SaveCachedNetwork", m_SaveCachedNetwork },
- { "CachedNetworkFilePath", m_CachedNetworkFilePath },
- { "TuningLevel", m_TuningLevel},
- { "TuningFile", m_TuningPath.c_str()},
- { "KernelProfilingEnabled", m_EnableProfiling},
- { "MLGOTuningFilePath", m_MLGOTuningFilePath}
- });
+ {
+ armnn::BackendOptions option("GpuAcc", {{"FastMathEnabled", m_EnableFastMath}});
+ optimizerOptions.m_ModelOptions.push_back(option);
+ }
+ {
+ armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath", m_CachedNetworkFilePath}});
+ optimizerOptions.m_ModelOptions.push_back(option);
+ }
+ {
+ armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", m_MLGOTuningFilePath}});
+ optimizerOptions.m_ModelOptions.push_back(option);
+ }
armnn::BackendOptions cpuAcc("CpuAcc",
{
{ "FastMathEnabled", m_EnableFastMath },
{ "NumberOfThreads", m_NumberOfThreads }
});
- options.m_ModelOptions.push_back(gpuAcc);
- options.m_ModelOptions.push_back(cpuAcc);
-
- if (m_InferOutputShape)
- {
- armnn::BackendOptions networkOption("ShapeInferenceMethod",
- {
- {"InferAndValidate", true}
- });
- options.m_ModelOptions.push_back(networkOption);
- }
+ optimizerOptions.m_ModelOptions.push_back(cpuAcc);
if (m_AllowExpandedDims)
{
armnn::BackendOptions networkOption("AllowExpandedDims",
{
{"AllowExpandedDims", true}
});
- options.m_ModelOptions.push_back(networkOption);
+ optimizerOptions.m_ModelOptions.push_back(networkOption);
}
- delegateOptions.SetOptimizerOptions(options);
-
+ delegateOptions.SetOptimizerOptions(optimizerOptions);
return delegateOptions;
}