aboutsummaryrefslogtreecommitdiff
path: root/tests/InferenceModel.hpp
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-03-24 12:07:25 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-04-12 18:28:23 +0100
commitc5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (patch)
tree931f1403589c34fd2de6b94d95e9e172a92424fe /tests/InferenceModel.hpp
parentca5c82af9269e7fd7ed17c7df9780a75fdaa733e (diff)
downloadarmnn-c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09.tar.gz
IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
Diffstat (limited to 'tests/InferenceModel.hpp')
-rw-r--r--tests/InferenceModel.hpp18
1 files changed, 9 insertions, 9 deletions
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index fa1b1b01b6..c053a4429a 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -455,13 +455,13 @@ public:
ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
- armnn::OptimizerOptions options;
- options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
- options.m_Debug = params.m_PrintIntermediateLayers;
- options.m_DebugToFile = params.m_PrintIntermediateLayersToFile;
- options.m_shapeInferenceMethod = params.m_InferOutputShape ?
- armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly;
- options.m_ProfilingEnabled = m_EnableProfiling;
+ armnn::OptimizerOptionsOpaque options;
+ options.SetReduceFp32ToFp16(params.m_EnableFp16TurboMode);
+ options.SetDebugEnabled(params.m_PrintIntermediateLayers);
+ options.SetDebugToFileEnabled(params.m_PrintIntermediateLayersToFile);
+ options.SetShapeInferenceMethod(params.m_InferOutputShape ?
+ armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly);
+ options.SetProfilingEnabled(m_EnableProfiling);
armnn::BackendOptions gpuAcc("GpuAcc",
{
@@ -476,8 +476,8 @@ public:
{ "FastMathEnabled", params.m_EnableFastMath },
{ "NumberOfThreads", params.m_NumberOfThreads }
});
- options.m_ModelOptions.push_back(gpuAcc);
- options.m_ModelOptions.push_back(cpuAcc);
+ options.AddModelOption(gpuAcc);
+ options.AddModelOption(cpuAcc);
const auto optimization_start_time = armnn::GetTimeNow();
optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);