aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-03-24 12:07:25 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-04-12 18:28:23 +0100
commitc5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (patch)
tree931f1403589c34fd2de6b94d95e9e172a92424fe /src
parentca5c82af9269e7fd7ed17c7df9780a75fdaa733e (diff)
downloadarmnn-c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09.tar.gz
IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
Diffstat (limited to 'src')
-rw-r--r--src/armnn/Network.cpp246
-rw-r--r--src/armnn/Network.hpp88
-rw-r--r--src/armnn/test/DebugCallbackTest.cpp4
-rw-r--r--src/armnn/test/EndToEndTest.cpp4
-rw-r--r--src/armnn/test/FlowControl.cpp4
-rw-r--r--src/armnn/test/RuntimeTests.cpp26
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp2
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp36
-rw-r--r--src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp8
-rw-r--r--src/backends/cl/test/ClContextSerializerTests.cpp10
-rw-r--r--src/backends/cl/test/ClCustomAllocatorTests.cpp12
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp16
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp44
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp10
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp40
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp15
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp4
18 files changed, 441 insertions, 134 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 4b89daf977..a069585216 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -45,6 +45,194 @@ INetwork::INetwork(NetworkOptions networkOptions) : pNetworkImpl(new NetworkImpl
INetwork::~INetwork() = default;
+OptimizerOptionsOpaque::OptimizerOptionsOpaque()
+ : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>())
+{
+}
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(OptimizerOptionsOpaque const &other)
+ : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(*other.p_OptimizerOptionsImpl))
+{
+}
+
+OptimizerOptionsOpaque::~OptimizerOptionsOpaque() = default;
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+ bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
+ bool debugToFile)
+ : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(reduceFp32ToFp16, debug, reduceFp32ToBf16,
+ importEnabled, modelOptions,
+ exportEnabled, debugToFile))
+{
+}
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+ ShapeInferenceMethod shapeInferenceMethod,
+ bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
+ bool debugToFile, bool allowExpandedDims)
+ : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>(reduceFp32ToFp16, debug, reduceFp32ToBf16,
+ shapeInferenceMethod, importEnabled,
+ modelOptions, exportEnabled,
+ debugToFile, allowExpandedDims))
+{
+}
+
+OptimizerOptionsOpaque::OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct)
+ : p_OptimizerOptionsImpl(std::make_unique<OptimizerOptionsOpaqueImpl>())
+{
+ p_OptimizerOptionsImpl->m_ImportEnabled = OptimizerStruct.m_ImportEnabled;
+ p_OptimizerOptionsImpl->m_shapeInferenceMethod = OptimizerStruct.m_shapeInferenceMethod;
+ p_OptimizerOptionsImpl->m_ModelOptions = OptimizerStruct.m_ModelOptions;
+ p_OptimizerOptionsImpl->m_ProfilingEnabled = OptimizerStruct.m_ProfilingEnabled;
+ p_OptimizerOptionsImpl->m_DebugToFile = OptimizerStruct.m_DebugToFile;
+ p_OptimizerOptionsImpl->m_Debug = OptimizerStruct.m_Debug;
+ p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = OptimizerStruct.m_ReduceFp32ToFp16;
+ p_OptimizerOptionsImpl->m_ExportEnabled = OptimizerStruct.m_ExportEnabled;
+ p_OptimizerOptionsImpl->m_AllowExpandedDims = OptimizerStruct.m_AllowExpandedDims;
+ p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 = OptimizerStruct.m_ReduceFp32ToBf16;
+}
+
+OptimizerOptionsOpaque& OptimizerOptionsOpaque::operator= (OptimizerOptionsOpaque other)
+{
+ p_OptimizerOptionsImpl->m_ImportEnabled = other.GetImportEnabled();
+ p_OptimizerOptionsImpl->m_shapeInferenceMethod = other.GetShapeInferenceMethod();
+ p_OptimizerOptionsImpl->m_ModelOptions = other.GetModelOptions();
+ p_OptimizerOptionsImpl->m_ProfilingEnabled = other.GetProfilingEnabled();
+ p_OptimizerOptionsImpl->m_DebugToFile = other.GetDebugToFileEnabled();
+ p_OptimizerOptionsImpl->m_Debug = other.GetDebugEnabled();
+ p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = other.GetReduceFp32ToFp16();
+ p_OptimizerOptionsImpl->m_ExportEnabled = other.GetExportEnabled();
+ p_OptimizerOptionsImpl->m_AllowExpandedDims = other.GetAllowExpandedDims();
+ p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 = other.GetReduceFp32ToBf16();
+ return *this;
+}
+
+void OptimizerOptionsOpaque::SetImportEnabled(bool ImportState)
+{
+ p_OptimizerOptionsImpl->m_ImportEnabled = ImportState;
+}
+
+void OptimizerOptionsOpaque::SetExportEnabled(bool ExportState)
+{
+ p_OptimizerOptionsImpl->m_ExportEnabled = ExportState;
+}
+
+void OptimizerOptionsOpaque::SetProfilingEnabled(bool ProfilingState)
+{
+ p_OptimizerOptionsImpl->m_ProfilingEnabled = ProfilingState;
+}
+
+void OptimizerOptionsOpaque::SetDebugEnabled(bool DebugState)
+{
+ p_OptimizerOptionsImpl->m_Debug = DebugState;
+}
+
+void OptimizerOptionsOpaque::SetDebugToFileEnabled(bool DebugFileState)
+{
+ p_OptimizerOptionsImpl->m_DebugToFile = DebugFileState;
+}
+
+void OptimizerOptionsOpaque::SetReduceFp32ToFp16(bool ReduceFp32ToFp16State)
+{
+ p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 = ReduceFp32ToFp16State;
+}
+
+void OptimizerOptionsOpaque::SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType)
+{
+ p_OptimizerOptionsImpl->m_shapeInferenceMethod = ShapeInferenceMethodType;
+}
+
+void OptimizerOptionsOpaque::SetAllowExpandedDims(bool ExpandedDimsAllowed)
+{
+ p_OptimizerOptionsImpl->m_AllowExpandedDims = ExpandedDimsAllowed;
+}
+
+void OptimizerOptionsOpaque::AddModelOption(armnn::BackendOptions NewModelOption)
+{
+ p_OptimizerOptionsImpl->m_ModelOptions.push_back(NewModelOption);
+}
+
+bool OptimizerOptionsOpaque::GetProfilingEnabled() const
+{
+ return p_OptimizerOptionsImpl->m_ProfilingEnabled;
+};
+
+bool OptimizerOptionsOpaque::GetImportEnabled() const
+{
+ return p_OptimizerOptionsImpl->m_ImportEnabled;
+};
+
+bool OptimizerOptionsOpaque::GetExportEnabled() const
+{
+ return p_OptimizerOptionsImpl->m_ExportEnabled;
+};
+
+bool OptimizerOptionsOpaque::GetReduceFp32ToFp16() const
+{
+ return p_OptimizerOptionsImpl->m_ReduceFp32ToFp16;
+};
+
+bool OptimizerOptionsOpaque::GetReduceFp32ToBf16() const
+{
+ return p_OptimizerOptionsImpl->m_ReduceFp32ToBf16;
+}
+
+bool OptimizerOptionsOpaque::GetDebugEnabled() const
+{
+ return p_OptimizerOptionsImpl->m_Debug;
+}
+
+bool OptimizerOptionsOpaque::GetDebugToFileEnabled() const
+{
+ return p_OptimizerOptionsImpl->m_DebugToFile;
+}
+
+bool OptimizerOptionsOpaque::GetAllowExpandedDims() const
+{
+ return p_OptimizerOptionsImpl->m_AllowExpandedDims;
+}
+
+armnn::ModelOptions OptimizerOptionsOpaque::GetModelOptions() const
+{
+ return p_OptimizerOptionsImpl->m_ModelOptions;
+}
+
+armnn::ShapeInferenceMethod OptimizerOptionsOpaque::GetShapeInferenceMethod() const
+{
+ return p_OptimizerOptionsImpl->m_shapeInferenceMethod;
+}
+
+const std::string OptimizerOptionsOpaque::ToString() const
+{
+ std::stringstream stream;
+ stream << "OptimizerOptions: \n";
+ stream << "\tReduceFp32ToFp16: " << p_OptimizerOptionsImpl->m_ReduceFp32ToFp16 << "\n";
+ stream << "\tReduceFp32ToBf16: " << p_OptimizerOptionsImpl->m_ReduceFp32ToBf16 << "\n";
+ stream << "\tDebug: " << p_OptimizerOptionsImpl->m_Debug << "\n";
+ stream << "\tDebug to file: " << p_OptimizerOptionsImpl->m_DebugToFile << "\n";
+ stream << "\tShapeInferenceMethod: " <<
+ (p_OptimizerOptionsImpl->m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ?
+ "ValidateOnly" : "InferAndValidate") << "\n";
+ stream << "\tImportEnabled: " << p_OptimizerOptionsImpl->m_ImportEnabled << "\n";
+ stream << "\tExportEnabled: " << p_OptimizerOptionsImpl->m_ExportEnabled << "\n";
+ stream << "\tProfilingEnabled: " << p_OptimizerOptionsImpl->m_ProfilingEnabled << "\n";
+ stream << "\tAllowExpandedDims: " << p_OptimizerOptionsImpl->m_AllowExpandedDims << "\n";
+
+ stream << "\tModelOptions: \n";
+ for (auto optionsGroup : p_OptimizerOptionsImpl->m_ModelOptions)
+ {
+ for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
+ {
+ const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
+ stream << "\t\tBackend: " << optionsGroup.GetBackendId() << "\n"
+ << "\t\t\tOption: " << option.GetName() << "\n"
+ << "\t\t\tValue: " << std::string(option.GetValue().ToString()) << "\n";
+ }
+ }
+
+ return stream.str();
+}
+
Status INetwork::PrintGraph()
{
return pNetworkImpl->PrintGraph();
@@ -1581,18 +1769,32 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
return result;
}
+// Forwarding function to remain backward compatible with legacy OptimizerOptions
IOptimizedNetworkPtr Optimize(const Graph& inGraph,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
const OptimizerOptions& options,
Optional<std::vector<std::string>&> messages)
{
+ return Optimize(inGraph,
+ backendPreferences,
+ deviceSpec,
+ OptimizerOptionsOpaque(options),
+ messages);
+}
+
+IOptimizedNetworkPtr Optimize(const Graph& inGraph,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptionsOpaque& options,
+ Optional<std::vector<std::string>&> messages)
+{
ARMNN_LOG(debug) << options.ToString();
// Enable profiling
auto profiler = inGraph.GetProfiler();
ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
- profiler->EnableProfiling(options.m_ProfilingEnabled);
+ profiler->EnableProfiling(options.GetProfilingEnabled());
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer");
if (backendPreferences.empty())
@@ -1600,13 +1802,13 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
throw InvalidArgumentException("Invoked Optimize with no backends specified");
}
- if (options.m_ReduceFp32ToBf16)
+ if (options.GetReduceFp32ToBf16())
{
throw InvalidArgumentException("BFloat16 optimization is currently ignored. In order to use Bf16 optimization "
"Please use the FastMathEnabled backend option for CpuAcc or GpuAcc.");
}
- if (options.m_ReduceFp32ToFp16 && options.m_ReduceFp32ToBf16)
+ if (options.GetReduceFp32ToFp16() && options.GetReduceFp32ToBf16())
{
throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
}
@@ -1619,9 +1821,9 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
// We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
// The mechanism to do that is to add model options to the optimized network.
armnn::BackendOptions importExport("Global",
- {{"ImportEnabled", options.m_ImportEnabled},
- {"ExportEnabled", options.m_ExportEnabled}});
- ModelOptions optimizedOptions(options.m_ModelOptions);
+ {{"ImportEnabled", options.GetImportEnabled()},
+ {"ExportEnabled", options.GetExportEnabled()}});
+ ModelOptions optimizedOptions(options.GetModelOptions());
optimizedOptions.push_back(importExport);
auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
@@ -1632,7 +1834,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
// Get the optimized graph
Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
- if(options.m_shapeInferenceMethod == ShapeInferenceMethod::InferAndValidate)
+ if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
{
// Infer the tensor infos for all output slots. Throws an exception on failure
optGraph.InferTensorInfos();
@@ -1642,7 +1844,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
using namespace optimizations;
Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
- if(options.m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
+ if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::ValidateOnly)
{
// Validate the tensor infos for all output slots. Throws an exception on failure
optGraph.InferTensorInfos();
@@ -1677,8 +1879,8 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
FuseBatchNormIntoDepthwiseConvolution2DFloat32(),
FuseBatchNormIntoDepthwiseConvolution2DFloat16()));
- // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
- if (options.m_ReduceFp32ToFp16)
+
+ if (options.GetReduceFp32ToFp16())
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ReduceFp32ToFp16");
Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
@@ -1721,7 +1923,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
backendSettings,
backends,
- options.m_ModelOptions,
+ options.GetModelOptions(),
messages);
if (backendOptimizationResult.m_Error)
{
@@ -1739,11 +1941,11 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
// This must occur after all topological changes to the graph and any redirection of variables
// If the debug flag is set, then insert a DebugLayer after each layer
// Doing this after applying the backend optimizations as they might have changed some layers
- if (options.m_Debug && !options.m_DebugToFile)
+ if (options.GetDebugEnabled() && !options.GetDebugToFileEnabled())
{
Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
}
- else if (options.m_DebugToFile)
+ else if (options.GetDebugToFileEnabled())
{
// Setup the output file path
try
@@ -1763,8 +1965,8 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
backends,
tensorHandleFactoryRegistry,
- options.m_ImportEnabled,
- options.m_ExportEnabled,
+ options.GetImportEnabled(),
+ options.GetExportEnabled(),
messages);
if (strategyResult.m_Error)
@@ -1782,12 +1984,26 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
return optNet;
}
+// Forwarding function to remain backward compatible with legacy OptimizerOptions
IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
const OptimizerOptions& options,
Optional<std::vector<std::string>&> messages)
{
+ return Optimize(inNetwork,
+ backendPreferences,
+ deviceSpec,
+ OptimizerOptionsOpaque(options),
+ messages);
+}
+
+IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptionsOpaque& options,
+ Optional<std::vector<std::string>&> messages)
+{
return Optimize(inNetwork.pNetworkImpl->GetGraph(),
backendPreferences,
deviceSpec,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c6bf0859f7..eced4587b9 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -23,6 +23,7 @@
namespace armnn
{
+
class Graph;
using NetworkImplPtr = std::unique_ptr<NetworkImpl, void (*)(NetworkImpl* network)>;
@@ -292,4 +293,91 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
SubgraphView::IConnectableLayerIterator& lastLayer,
Optional<std::vector<std::string>&> errMessages);
+struct OptimizerOptionsOpaqueImpl
+{
+ ~OptimizerOptionsOpaqueImpl() = default;
+
+ explicit OptimizerOptionsOpaqueImpl()
+ : m_ReduceFp32ToFp16(false)
+ , m_Debug(false)
+ , m_DebugToFile(false)
+ , m_ReduceFp32ToBf16(false)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+ , m_ImportEnabled(false)
+ , m_ModelOptions()
+ , m_ProfilingEnabled(false)
+ , m_ExportEnabled(false)
+ , m_AllowExpandedDims(false)
+ {
+ }
+
+ explicit OptimizerOptionsOpaqueImpl(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+ bool importEnabled, ModelOptions modelOptions = {},
+ bool exportEnabled = false, bool debugToFile = false)
+ : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+ , m_Debug(debug)
+ , m_DebugToFile(debugToFile)
+ , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+ , m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
+ , m_ProfilingEnabled(false)
+ , m_ExportEnabled(exportEnabled)
+ , m_AllowExpandedDims(false)
+ {
+ }
+
+ explicit OptimizerOptionsOpaqueImpl(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16,
+ ShapeInferenceMethod shapeInferenceMethod,
+ bool importEnabled, ModelOptions modelOptions, bool exportEnabled,
+ bool debugToFile, bool allowExpandedDims)
+ : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+ , m_Debug(debug)
+ , m_DebugToFile(debugToFile)
+ , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(shapeInferenceMethod)
+ , m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
+ , m_ProfilingEnabled(false)
+ , m_ExportEnabled(exportEnabled)
+ , m_AllowExpandedDims(allowExpandedDims)
+ {
+ }
+
+ /// Reduces all Fp32 operators in the model to Fp16 for faster processing.
+ /// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
+ /// between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16.
+ /// The overhead of these conversions can lead to a slower overall performance if too many conversions are
+ /// required.
+ bool m_ReduceFp32ToFp16 = false;
+
+ /// Add debug data for easier troubleshooting
+ bool m_Debug = false;
+
+ /// Pass debug data to separate output files for easier troubleshooting
+ bool m_DebugToFile = false;
+
+ /// @Note This feature has been replaced by enabling Fast Math in compute library backend options.
+ /// This is currently a placeholder option
+ bool m_ReduceFp32ToBf16 = false;
+
+ /// Infer output size when not available
+ ShapeInferenceMethod m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
+
+ /// Enable Import
+ bool m_ImportEnabled = false;
+
+ /// Enable Model Options
+ ModelOptions m_ModelOptions;
+
+ /// Enable profiling dump of the optimizer phase
+ bool m_ProfilingEnabled = false;
+
+ /// Enable Export
+ bool m_ExportEnabled = false;
+
+ /// When calculating tensor sizes, dimensions of size == 1 will be ignored
+ bool m_AllowExpandedDims = false;
+};
+
} // namespace armnn
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index 600447c53a..e5e79304be 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,7 +47,7 @@ TEST_CASE("RuntimeRegisterDebugCallback")
IRuntimePtr runtime(IRuntime::Create(options));
// Optimize the network with debug option
- OptimizerOptions optimizerOptions(false, true);
+ OptimizerOptionsOpaque optimizerOptions(false, true);
std::vector<BackendId> backends = { "CpuRef" };
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index 8a64a4b75f..17e46667ef 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,7 +46,7 @@ TEST_CASE("ErrorOnLoadNetwork")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const InvalidArgumentException&)
diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp
index cdd86c06e4..563968a415 100644
--- a/src/armnn/test/FlowControl.cpp
+++ b/src/armnn/test/FlowControl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -55,7 +55,7 @@ TEST_CASE("ErrorOnLoadNetwork")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const InvalidArgumentException&)
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 427352aaac..67684448bb 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -543,7 +543,7 @@ TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue")
armnn::IOptimizedNetworkPtr optNet = Optimize(*net,
backends,
runtime->GetDeviceSpec(),
- OptimizerOptions(),
+ OptimizerOptionsOpaque(),
errMessages);
FAIL("An exception should have been thrown");
}
@@ -1327,10 +1327,10 @@ TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- OptimizerOptions optimizedOptions;
+ OptimizerOptionsOpaque optimizedOptions;
// Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = false;
- optimizedOptions.m_ExportEnabled = false;
+ optimizedOptions.SetImportEnabled(false);
+ optimizedOptions.SetExportEnabled(false);
IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -1372,10 +1372,10 @@ TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- OptimizerOptions optimizedOptions;
+ OptimizerOptionsOpaque optimizedOptions;
// Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = false;
- optimizedOptions.m_ExportEnabled = false;
+ optimizedOptions.SetImportEnabled(false);
+ optimizedOptions.SetExportEnabled(false);
IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -1417,10 +1417,10 @@ TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- OptimizerOptions optimizedOptions;
+ OptimizerOptionsOpaque optimizedOptions;
// Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = false;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(false);
IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -1462,10 +1462,10 @@ TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- OptimizerOptions optimizedOptions;
+ OptimizerOptionsOpaque optimizedOptions;
// Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = false;
- optimizedOptions.m_ExportEnabled = true;
+ optimizedOptions.SetImportEnabled(false);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 691adbff9d..5e11ab6258 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -2181,7 +2181,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
std::vector<armnn::BackendId> backends = {factory.GetBackendId()};
armnn::IRuntime::CreationOptions options;
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);
CHECK(optimizedNet != nullptr);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index 73cef16aad..bd5466ac04 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -210,8 +210,8 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -278,9 +278,9 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -353,9 +353,9 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
CHECK(optNet);
@@ -441,8 +441,8 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -531,8 +531,8 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -620,9 +620,9 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
INFO("Load Network");
@@ -714,9 +714,9 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optimizedOptions;
+ optimizedOptions.SetImportEnabled(true);
+ optimizedOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
// Loads it into the runtime.
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index 226e2b3364..c5f9869298 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -157,8 +157,8 @@ std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// optimize the network
- armnn::OptimizerOptions optOptions;
- optOptions.m_ProfilingEnabled = true;
+ armnn::OptimizerOptionsOpaque optOptions;
+ optOptions.SetProfilingEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
if(!optNet)
{
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 5e619df8dd..ce1eea4194 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -93,7 +93,7 @@ TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException&)
@@ -213,7 +213,8 @@ TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDevice")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(),
+ armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException&)
@@ -421,7 +422,8 @@ TEST_CASE("OptimizeNetworkCopy")
std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
armnn::ModelOptions modelOptions;
- armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+ armnn::OptimizerOptionsOpaque optimizerOptions(false, false, false,
+ false, modelOptions, false);
std::vector<std::string> errorMessages;
// optimize the network.
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 862ed2ecab..81a66145d9 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -88,14 +88,14 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
armnn::INetworkPtr net2 = CreateNetwork();
// Add specific optimizerOptions to each network.
- armnn::OptimizerOptions optimizerOptions1;
- armnn::OptimizerOptions optimizerOptions2;
+ armnn::OptimizerOptionsOpaque optimizerOptions1;
+ armnn::OptimizerOptionsOpaque optimizerOptions2;
armnn::BackendOptions modelOptions1("GpuAcc",
{{"SaveCachedNetwork", true}, {"CachedNetworkFilePath", filePathString}});
armnn::BackendOptions modelOptions2("GpuAcc",
{{"SaveCachedNetwork", false}, {"CachedNetworkFilePath", filePathString}});
- optimizerOptions1.m_ModelOptions.push_back(modelOptions1);
- optimizerOptions2.m_ModelOptions.push_back(modelOptions2);
+ optimizerOptions1.AddModelOption(modelOptions1);
+ optimizerOptions2.AddModelOption(modelOptions2);
armnn::IOptimizedNetworkPtr optNet1 = armnn::Optimize(
*net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 251c98fcad..1cc2c4c95a 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -118,9 +118,9 @@ TEST_CASE("ClCustomAllocatorTest")
IRuntimePtr run = IRuntime::Create(options);
// Optimise ArmNN network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -188,8 +188,8 @@ TEST_CASE("ClCustomAllocatorCpuAccNegativeTest")
INetworkPtr myNetwork = CreateTestNetwork(inputTensorInfo);
// Optimise ArmNN network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
IOptimizedNetworkPtr optNet(nullptr, nullptr);
std::vector<std::string> errMessages;
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 9443116c92..acba449e18 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -48,9 +48,9 @@ TEST_CASE("ClImportEnabledFallbackToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -196,7 +196,7 @@ TEST_CASE("ClImportDisabledFallbackToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -329,9 +329,9 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -488,7 +488,7 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 1198cade61..39619e6421 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -140,9 +140,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
activation->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -337,9 +337,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -473,9 +473,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -621,9 +621,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -760,9 +760,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16
convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -912,9 +912,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -1138,9 +1138,9 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
// Optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(false);
+ optOptions.SetExportEnabled(false);
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index 6648759a9a..3d4341df18 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -86,8 +86,8 @@ TEST_CASE("FP16TurboModeTestOnGpuAcc")
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_ReduceFp32ToFp16 = true;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
+ optimizerOptions.SetReduceFp32ToFp16(true);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -119,9 +119,9 @@ TEST_CASE("FastMathEnabledTestOnGpuAcc")
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 40df2dc315..eeb8107d49 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -58,9 +58,9 @@ TEST_CASE("FallbackImportToCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -202,9 +202,9 @@ TEST_CASE("FallbackPaddingCopyToCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -338,9 +338,9 @@ TEST_CASE("FallbackImportFromCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -483,9 +483,9 @@ TEST_CASE("FallbackPaddingCopyFromCpuAcc")
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -748,9 +748,9 @@ TEST_CASE("NeonImportEnabledFallbackToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -901,7 +901,7 @@ TEST_CASE("NeonImportDisabledFallbackToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -1040,9 +1040,9 @@ TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
- optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
+ OptimizerOptionsOpaque optOptions;
+ optOptions.SetImportEnabled(true);
+ optOptions.SetExportEnabled(true);
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -1204,7 +1204,7 @@ TEST_CASE("NeonImportDisableFallbackSubgraphToCl")
sub->BackendSelectionHint(backends[1]);
// optimize the network
- OptimizerOptions optOptions;
+ OptimizerOptionsOpaque optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index dcda9bfd07..4b700b034c 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -71,7 +71,8 @@ TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
try
{
- Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(),
+ armnn::OptimizerOptionsOpaque(), errMessages);
FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException& e)
@@ -95,9 +96,9 @@ TEST_CASE("FastMathEnabledTestOnCpuAcc")
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
@@ -127,16 +128,16 @@ TEST_CASE("NumberOfThreadsTestOnCpuAcc")
unsigned int numberOfThreads = 2;
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- armnn::OptimizerOptions optimizerOptions;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
armnn::BackendOptions modelOptions("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
- optimizerOptions.m_ModelOptions.push_back(modelOptions);
+ optimizerOptions.AddModelOption(modelOptions);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
CHECK(optimizedNet);
std::unique_ptr<armnn::Graph> graphPtr;
- armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
+ armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.GetModelOptions());
auto modelOptionsOut = impl.GetModelOptions();
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 7e8064fc76..b4a135ffba 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -187,8 +187,8 @@ TEST_CASE("DebugTestOnCpuRef")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- armnn::OptimizerOptions optimizerOptions;
- optimizerOptions.m_Debug = true;
+ armnn::OptimizerOptionsOpaque optimizerOptions;
+ optimizerOptions.SetDebugEnabled(true);
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);