aboutsummaryrefslogtreecommitdiff
path: root/samples/ObjectDetection
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-03-24 12:07:25 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-04-12 18:28:23 +0100
commitc5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (patch)
tree931f1403589c34fd2de6b94d95e9e172a92424fe /samples/ObjectDetection
parentca5c82af9269e7fd7ed17c7df9780a75fdaa733e (diff)
downloadarmnn-c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09.tar.gz
IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
Diffstat (limited to 'samples/ObjectDetection')
-rw-r--r--samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp b/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp
index c8875a27dc..557ec8a8ac 100644
--- a/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp
+++ b/samples/ObjectDetection/include/delegate/ArmnnNetworkExecutor.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -104,7 +104,7 @@ ArmnnNetworkExecutor<Tout>::ArmnnNetworkExecutor(std::string& modelPath,
m_profiling(isProfilingEnabled)
{
m_profiling.ProfilingStart();
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
m_model = tflite::FlatBufferModel::BuildFromFile(modelPath.c_str());
if (m_model == nullptr)
{
@@ -130,12 +130,12 @@ ArmnnNetworkExecutor<Tout>::ArmnnNetworkExecutor(std::string& modelPath,
/* enable fast math optimization */
armnn::BackendOptions modelOptionGpu("GpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptionGpu);
+ optimizerOptions.AddModelOption(modelOptionGpu);
armnn::BackendOptions modelOptionCpu("CpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptionCpu);
+ optimizerOptions.AddModelOption(modelOptionCpu);
/* enable reduce float32 to float16 optimization */
- optimizerOptions.m_ReduceFp32ToFp16 = true;
+ optimizerOptions.SetReduceFp32ToFp16(true);
armnnDelegate::DelegateOptions delegateOptions(preferredBackends, optimizerOptions);