aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-03-24 12:07:25 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2023-04-12 18:28:23 +0100
commitc5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 (patch)
tree931f1403589c34fd2de6b94d95e9e172a92424fe /include
parentca5c82af9269e7fd7ed17c7df9780a75fdaa733e (diff)
downloadarmnn-c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09.tar.gz
IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions
Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c
Diffstat (limited to 'include')
-rw-r--r--include/armnn/INetwork.hpp164
1 files changed, 127 insertions, 37 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 273753752d..819f5cb1a3 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -136,53 +136,53 @@ protected:
~IConnectableLayer() {}
};
-
-/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides
-/// a set of features that allows the user to customize this optimization on a per model basis.
struct OptimizerOptions
{
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
OptimizerOptions()
- : m_ReduceFp32ToFp16(false)
- , m_Debug(false)
- , m_DebugToFile(false)
- , m_ReduceFp32ToBf16(false)
- , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
- , m_ImportEnabled(false)
- , m_ModelOptions()
- , m_ProfilingEnabled(false)
- , m_ExportEnabled(false)
- , m_AllowExpandedDims(false)
+ : m_ReduceFp32ToFp16(false)
+ , m_Debug(false)
+ , m_DebugToFile(false)
+ , m_ReduceFp32ToBf16(false)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+ , m_ImportEnabled(false)
+ , m_ModelOptions()
+ , m_ProfilingEnabled(false)
+ , m_ExportEnabled(false)
+ , m_AllowExpandedDims(false)
{}
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false)
- : m_ReduceFp32ToFp16(reduceFp32ToFp16)
- , m_Debug(debug)
- , m_DebugToFile(debugToFile)
- , m_ReduceFp32ToBf16(reduceFp32ToBf16)
- , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
- , m_ImportEnabled(importEnabled)
- , m_ModelOptions(modelOptions)
- , m_ProfilingEnabled(false)
- , m_ExportEnabled(exportEnabled)
- , m_AllowExpandedDims(false)
+ : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+ , m_Debug(debug)
+ , m_DebugToFile(debugToFile)
+ , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+ , m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
+ , m_ProfilingEnabled(false)
+ , m_ExportEnabled(exportEnabled)
+ , m_AllowExpandedDims(false)
{
}
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02")
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
bool debugToFile = false, bool allowExpandedDims = false)
- : m_ReduceFp32ToFp16(reduceFp32ToFp16)
- , m_Debug(debug)
- , m_DebugToFile(debugToFile)
- , m_ReduceFp32ToBf16(reduceFp32ToBf16)
- , m_shapeInferenceMethod(shapeInferenceMethod)
- , m_ImportEnabled(importEnabled)
- , m_ModelOptions(modelOptions)
- , m_ProfilingEnabled(false)
- , m_ExportEnabled(exportEnabled)
- , m_AllowExpandedDims(allowExpandedDims)
+ : m_ReduceFp32ToFp16(reduceFp32ToFp16)
+ , m_Debug(debug)
+ , m_DebugToFile(debugToFile)
+ , m_ReduceFp32ToBf16(reduceFp32ToBf16)
+ , m_shapeInferenceMethod(shapeInferenceMethod)
+ , m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
+ , m_ProfilingEnabled(false)
+ , m_ExportEnabled(exportEnabled)
+ , m_AllowExpandedDims(allowExpandedDims)
{
}
@@ -195,7 +195,8 @@ struct OptimizerOptions
stream << "\tDebug: " << m_Debug << "\n";
stream << "\tDebug to file: " << m_DebugToFile << "\n";
stream << "\tShapeInferenceMethod: " <<
- (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
+ (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly
+ ? "ValidateOnly" : "InferAndValidate") << "\n";
stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
@@ -252,6 +253,75 @@ struct OptimizerOptions
bool m_AllowExpandedDims;
};
+/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides
+/// a set of features that allows the user to customize this optimization on a per model basis.
+struct OptimizerOptionsOpaqueImpl;
+
+class OptimizerOptionsOpaque
+{
+public:
+ OptimizerOptionsOpaque();
+ OptimizerOptionsOpaque(const OptimizerOptionsOpaque& other);
+ ~OptimizerOptionsOpaque();
+
+ OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct);
+
+ OptimizerOptionsOpaque& operator=(OptimizerOptionsOpaque other);
+
+ OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
+ ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false);
+
+ OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
+ ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
+ bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
+ bool debugToFile = false, bool allowExpandedDims = false);
+
+ const std::string ToString() const;
+
+ bool GetProfilingEnabled() const;
+
+ bool GetImportEnabled() const;
+
+ bool GetExportEnabled() const;
+
+ bool GetReduceFp32ToFp16() const;
+
+ bool GetReduceFp32ToBf16() const;
+
+ bool GetDebugEnabled() const;
+
+ bool GetDebugToFileEnabled() const;
+
+ bool GetAllowExpandedDims() const;
+
+ armnn::ModelOptions GetModelOptions() const;
+
+ armnn::ShapeInferenceMethod GetShapeInferenceMethod() const;
+
+ void SetImportEnabled(bool ImportState);
+
+ void SetExportEnabled(bool ExportState);
+
+ void SetProfilingEnabled(bool ProfilingState);
+
+ void SetDebugEnabled(bool DebugState);
+
+ void SetDebugToFileEnabled(bool DebugFileState);
+
+ void SetReduceFp32ToFp16(bool ReduceFp32ToFp16State);
+
+ void SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType);
+
+ void AddModelOption(armnn::BackendOptions);
+
+ void SetAllowExpandedDims(bool ExpandedDimsAllowed);
+
+private:
+
+ std::unique_ptr<armnn::OptimizerOptionsOpaqueImpl> p_OptimizerOptionsImpl;
+
+};
+
class IWorkloadFactory;
class NetworkImpl;
using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
@@ -768,6 +838,11 @@ protected:
const IDeviceSpec& deviceSpec,
const OptimizerOptions& options,
Optional<std::vector<std::string>&> messages);
+ friend IOptimizedNetworkPtr Optimize(const INetwork& network,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptionsOpaque& options,
+ Optional<std::vector<std::string>&> messages);
INetwork(NetworkOptions networkOptions = {});
@@ -819,12 +894,12 @@ protected:
friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
- const OptimizerOptions& options,
+ const OptimizerOptionsOpaque& options,
Optional<std::vector<std::string>&> messages);
friend IOptimizedNetworkPtr Optimize(const Graph& inGraph,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
- const OptimizerOptions& options,
+ const OptimizerOptionsOpaque& options,
Optional<std::vector<std::string>&> messages);
IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
@@ -844,7 +919,7 @@ protected:
IOptimizedNetworkPtr Optimize(const INetwork& network,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
- const OptimizerOptions& options = OptimizerOptions(),
+ const OptimizerOptionsOpaque& options = OptimizerOptionsOpaque(),
Optional<std::vector<std::string>&> messages = EmptyOptional());
/// Create an optimized version of the network
@@ -859,6 +934,21 @@ IOptimizedNetworkPtr Optimize(const INetwork& network,
IOptimizedNetworkPtr Optimize(const Graph& inGraph,
const std::vector<BackendId>& backendPreferences,
const IDeviceSpec& deviceSpec,
+ const OptimizerOptionsOpaque& options,
+ Optional<std::vector<std::string>&> messages = EmptyOptional());
+
+/// Accept legacy OptimizerOptions
+IOptimizedNetworkPtr Optimize(const Graph& inGraph,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
const OptimizerOptions& options,
Optional<std::vector<std::string>&> messages = EmptyOptional());
+
+/// Accept legacy OptimizerOptions
+IOptimizedNetworkPtr Optimize(const INetwork& network,
+ const std::vector<BackendId>& backendPreferences,
+ const IDeviceSpec& deviceSpec,
+ const OptimizerOptions& options,
+ Optional<std::vector<std::string>&> messages = EmptyOptional());
+
} //namespace armnn