aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-03-24 12:07:25 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-04-12 18:28:23 +0100
commitc5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (patch)
tree931f1403589c34fd2de6b94d95e9e172a92424fe /tests
parentca5c82af9269e7fd7ed17c7df9780a75fdaa733e (diff)
downloadarmnn-c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09.tar.gz
IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ArmNNExecutor.cpp26
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp28
-rw-r--r--tests/InferenceModel.hpp18
-rw-r--r--tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp6
4 files changed, 39 insertions, 39 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 29ef4c5186..ac857a90df 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -518,15 +518,15 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw
{
armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
- armnn::OptimizerOptions options;
- options.m_ReduceFp32ToFp16 = m_Params.m_EnableFp16TurboMode;
- options.m_Debug = m_Params.m_PrintIntermediate;
- options.m_DebugToFile = m_Params.m_PrintIntermediateOutputsToFile;
- options.m_shapeInferenceMethod = m_Params.m_InferOutputShape ?
- armnn::ShapeInferenceMethod::InferAndValidate :
- armnn::ShapeInferenceMethod::ValidateOnly;
- options.m_ProfilingEnabled = m_Params.m_EnableProfiling;
- options.m_AllowExpandedDims = m_Params.m_AllowExpandedDims;
+ armnn::OptimizerOptionsOpaque options;
+ options.SetReduceFp32ToFp16(m_Params.m_EnableFp16TurboMode);
+ options.SetDebugEnabled(m_Params.m_PrintIntermediate);
+ options.SetDebugToFileEnabled(m_Params.m_PrintIntermediateOutputsToFile);
+ options.SetShapeInferenceMethod(m_Params.m_InferOutputShape ?
+ armnn::ShapeInferenceMethod::InferAndValidate :
+ armnn::ShapeInferenceMethod::ValidateOnly);
+ options.SetProfilingEnabled(m_Params.m_EnableProfiling);
+ options.SetAllowExpandedDims(m_Params.m_AllowExpandedDims);
armnn::BackendOptions gpuAcc("GpuAcc",
{
@@ -541,8 +541,8 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw
{ "FastMathEnabled", m_Params.m_EnableFastMath },
{ "NumberOfThreads", m_Params.m_NumberOfThreads }
});
- options.m_ModelOptions.push_back(gpuAcc);
- options.m_ModelOptions.push_back(cpuAcc);
+ options.AddModelOption(gpuAcc);
+ options.AddModelOption(cpuAcc);
// The shapeInferenceMethod and allowExpandedDims values have to be added to the model options
// because these are what are passed to the OptimizeSubgraphViews method and are used to create
// the new optimized INetwork that method uses
@@ -550,12 +550,12 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw
{
{ "AllowExpandedDims", m_Params.m_AllowExpandedDims }
});
- options.m_ModelOptions.push_back(allowExDimOpt);
+ options.AddModelOption(allowExDimOpt);
armnn::BackendOptions shapeInferOpt("ShapeInferenceMethod",
{
{ "InferAndValidate", m_Params.m_InferOutputShape }
});
- options.m_ModelOptions.push_back(shapeInferOpt);
+ options.AddModelOption(shapeInferOpt);
const auto optimization_start_time = armnn::GetTimeNow();
optNet = armnn::Optimize(*network, m_Params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index fbfd1bc936..3628fa4976 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -137,33 +137,33 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
}
// Optimizer options next.
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
- optimizerOptions.m_Debug = m_PrintIntermediate;
- optimizerOptions.m_DebugToFile = m_PrintIntermediateOutputsToFile;
- optimizerOptions.m_ProfilingEnabled = m_EnableProfiling;
- optimizerOptions.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
+ optimizerOptions.SetReduceFp32ToFp16(m_EnableFp16TurboMode);
+ optimizerOptions.SetDebugEnabled(m_PrintIntermediate);
+ optimizerOptions.SetDebugToFileEnabled(m_PrintIntermediateOutputsToFile);
+ optimizerOptions.SetProfilingEnabled(m_EnableProfiling);
+ optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly);
if (m_InferOutputShape)
{
- optimizerOptions.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::InferAndValidate;
+ optimizerOptions.SetShapeInferenceMethod(armnn::ShapeInferenceMethod::InferAndValidate);
armnn::BackendOptions networkOption("ShapeInferenceMethod",
{
{"InferAndValidate", true}
});
- optimizerOptions.m_ModelOptions.push_back(networkOption);
+ optimizerOptions.AddModelOption(networkOption);
}
{
armnn::BackendOptions option("GpuAcc", {{"FastMathEnabled", m_EnableFastMath}});
- optimizerOptions.m_ModelOptions.push_back(option);
+ optimizerOptions.AddModelOption(option);
}
{
armnn::BackendOptions option("GpuAcc", {{"CachedNetworkFilePath", m_CachedNetworkFilePath}});
- optimizerOptions.m_ModelOptions.push_back(option);
+ optimizerOptions.AddModelOption(option);
}
{
armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", m_MLGOTuningFilePath}});
- optimizerOptions.m_ModelOptions.push_back(option);
+ optimizerOptions.AddModelOption(option);
}
armnn::BackendOptions cpuAcc("CpuAcc",
@@ -171,14 +171,14 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
{ "FastMathEnabled", m_EnableFastMath },
{ "NumberOfThreads", m_NumberOfThreads }
});
- optimizerOptions.m_ModelOptions.push_back(cpuAcc);
+ optimizerOptions.AddModelOption(cpuAcc);
if (m_AllowExpandedDims)
{
armnn::BackendOptions networkOption("AllowExpandedDims",
{
{"AllowExpandedDims", true}
});
- optimizerOptions.m_ModelOptions.push_back(networkOption);
+ optimizerOptions.AddModelOption(networkOption);
}
delegateOptions.SetOptimizerOptions(optimizerOptions);
return delegateOptions;
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index fa1b1b01b6..c053a4429a 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -455,13 +455,13 @@ public:
ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
- armnn::OptimizerOptions options;
- options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
- options.m_Debug = params.m_PrintIntermediateLayers;
- options.m_DebugToFile = params.m_PrintIntermediateLayersToFile;
- options.m_shapeInferenceMethod = params.m_InferOutputShape ?
- armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly;
- options.m_ProfilingEnabled = m_EnableProfiling;
+ armnn::OptimizerOptionsOpaque options;
+ options.SetReduceFp32ToFp16(params.m_EnableFp16TurboMode);
+ options.SetDebugEnabled(params.m_PrintIntermediateLayers);
+ options.SetDebugToFileEnabled(params.m_PrintIntermediateLayersToFile);
+ options.SetShapeInferenceMethod(params.m_InferOutputShape ?
+ armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly);
+ options.SetProfilingEnabled(m_EnableProfiling);
armnn::BackendOptions gpuAcc("GpuAcc",
{
@@ -476,8 +476,8 @@ public:
{ "FastMathEnabled", params.m_EnableFastMath },
{ "NumberOfThreads", params.m_NumberOfThreads }
});
- options.m_ModelOptions.push_back(gpuAcc);
- options.m_ModelOptions.push_back(cpuAcc);
+ options.AddModelOption(gpuAcc);
+ options.AddModelOption(cpuAcc);
const auto optimization_start_time = armnn::GetTimeNow();
optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
diff --git a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
index 75bc9a3244..3ecd160b48 100644
--- a/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
+++ b/tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp
@@ -128,8 +128,8 @@ int LoadModel(const char* filename,
ARMNN_LOG(debug) << "Model loaded ok: " << filename;
// Optimize backbone model
- OptimizerOptions options;
- options.m_ImportEnabled = enableImport != ImportMemory::False;
+ OptimizerOptionsOpaque options;
+ options.SetImportEnabled(enableImport != ImportMemory::False);
auto optimizedModel = Optimize(*model, backendPreferences, runtime.GetDeviceSpec(), options);
if (!optimizedModel)
{
@@ -149,7 +149,7 @@ int LoadModel(const char* filename,
{
std::string errorMessage;
- armnn::MemorySource memSource = options.m_ImportEnabled ? armnn::MemorySource::Malloc
+ armnn::MemorySource memSource = options.GetImportEnabled() ? armnn::MemorySource::Malloc
: armnn::MemorySource::Undefined;
INetworkProperties modelProps(false, memSource, memSource);
Status status = runtime.LoadNetwork(networkId, std::move(optimizedModel), errorMessage, modelProps);