From c5ee0d7460f1e0ec7e2b0639e3e8962934c4df09 Mon Sep 17 00:00:00 2001 From: John Mcloughlin Date: Fri, 24 Mar 2023 12:07:25 +0000 Subject: IVGCVSW-7197 Implement Pimpl Idiom for OptimizerOptions Signed-off-by: John Mcloughlin Change-Id: Id4bdc31e3e6f18ccaef232c29a2d2825c915b21c --- include/armnn/INetwork.hpp | 164 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 127 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 273753752d..819f5cb1a3 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -136,53 +136,53 @@ protected: ~IConnectableLayer() {} }; - -/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides -/// a set of features that allows the user to customize this optimization on a per model basis. struct OptimizerOptions { + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02") OptimizerOptions() - : m_ReduceFp32ToFp16(false) - , m_Debug(false) - , m_DebugToFile(false) - , m_ReduceFp32ToBf16(false) - , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) - , m_ImportEnabled(false) - , m_ModelOptions() - , m_ProfilingEnabled(false) - , m_ExportEnabled(false) - , m_AllowExpandedDims(false) + : m_ReduceFp32ToFp16(false) + , m_Debug(false) + , m_DebugToFile(false) + , m_ReduceFp32ToBf16(false) + , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) + , m_ImportEnabled(false) + , m_ModelOptions() + , m_ProfilingEnabled(false) + , m_ExportEnabled(false) + , m_AllowExpandedDims(false) {} + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02") OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled, ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false) - : m_ReduceFp32ToFp16(reduceFp32ToFp16) - , m_Debug(debug) - , m_DebugToFile(debugToFile) - , m_ReduceFp32ToBf16(reduceFp32ToBf16) - , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) - , m_ImportEnabled(importEnabled) - , m_ModelOptions(modelOptions) - , m_ProfilingEnabled(false) - , m_ExportEnabled(exportEnabled) - , m_AllowExpandedDims(false) + : m_ReduceFp32ToFp16(reduceFp32ToFp16) + , m_Debug(debug) + , m_DebugToFile(debugToFile) + , m_ReduceFp32ToBf16(reduceFp32ToBf16) + , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) + , m_ImportEnabled(importEnabled) + , m_ModelOptions(modelOptions) + , m_ProfilingEnabled(false) + , m_ExportEnabled(exportEnabled) + , m_AllowExpandedDims(false) { } + ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable OptimizerOptionsOpaque instead.", "24.02") OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly, bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false, bool allowExpandedDims = false) - : m_ReduceFp32ToFp16(reduceFp32ToFp16) - , m_Debug(debug) - , m_DebugToFile(debugToFile) - , m_ReduceFp32ToBf16(reduceFp32ToBf16) - , m_shapeInferenceMethod(shapeInferenceMethod) - , m_ImportEnabled(importEnabled) - , m_ModelOptions(modelOptions) - , m_ProfilingEnabled(false) - , m_ExportEnabled(exportEnabled) - , m_AllowExpandedDims(allowExpandedDims) + : m_ReduceFp32ToFp16(reduceFp32ToFp16) + , m_Debug(debug) + , m_DebugToFile(debugToFile) + , m_ReduceFp32ToBf16(reduceFp32ToBf16) + , m_shapeInferenceMethod(shapeInferenceMethod) + , m_ImportEnabled(importEnabled) + , m_ModelOptions(modelOptions) + , m_ProfilingEnabled(false) + , m_ExportEnabled(exportEnabled) + , m_AllowExpandedDims(allowExpandedDims) { } @@ -195,7 +195,8 @@ struct OptimizerOptions stream << "\tDebug: " << m_Debug << "\n"; stream << "\tDebug to file: " << m_DebugToFile << "\n"; stream << "\tShapeInferenceMethod: " << - (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n"; + (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly + ? "ValidateOnly" : "InferAndValidate") << "\n"; stream << "\tImportEnabled: " << m_ImportEnabled << "\n"; stream << "\tExportEnabled: " << m_ExportEnabled << "\n"; stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n"; @@ -252,6 +253,75 @@ struct OptimizerOptions bool m_AllowExpandedDims; }; +/// ArmNN performs an optimization on each model/network before it gets loaded for execution. OptimizerOptions provides +/// a set of features that allows the user to customize this optimization on a per model basis. +struct OptimizerOptionsOpaqueImpl; + +class OptimizerOptionsOpaque +{ +public: + OptimizerOptionsOpaque(); + OptimizerOptionsOpaque(const OptimizerOptionsOpaque& other); + ~OptimizerOptionsOpaque(); + + OptimizerOptionsOpaque(const OptimizerOptions& OptimizerStruct); + + OptimizerOptionsOpaque& operator=(OptimizerOptionsOpaque other); + + OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled, + ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false); + + OptimizerOptionsOpaque(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, + ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly, + bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false, + bool debugToFile = false, bool allowExpandedDims = false); + + const std::string ToString() const; + + bool GetProfilingEnabled() const; + + bool GetImportEnabled() const; + + bool GetExportEnabled() const; + + bool GetReduceFp32ToFp16() const; + + bool GetReduceFp32ToBf16() const; + + bool GetDebugEnabled() const; + + bool GetDebugToFileEnabled() const; + + bool GetAllowExpandedDims() const; + + armnn::ModelOptions GetModelOptions() const; + + armnn::ShapeInferenceMethod GetShapeInferenceMethod() const; + + void SetImportEnabled(bool ImportState); + + void SetExportEnabled(bool ExportState); + + void SetProfilingEnabled(bool ProfilingState); + + void SetDebugEnabled(bool DebugState); + + void SetDebugToFileEnabled(bool DebugFileState); + + void SetReduceFp32ToFp16(bool ReduceFp32ToFp16State); + + void SetShapeInferenceMethod(armnn::ShapeInferenceMethod ShapeInferenceMethodType); + + void AddModelOption(armnn::BackendOptions); + + void SetAllowExpandedDims(bool ExpandedDimsAllowed); + +private: + + std::unique_ptr p_OptimizerOptionsImpl; + +}; + class IWorkloadFactory; class NetworkImpl; using INetworkPtr = std::unique_ptr; @@ -768,6 +838,11 @@ protected: const IDeviceSpec& deviceSpec, const OptimizerOptions& options, Optional&> messages); + friend IOptimizedNetworkPtr Optimize(const INetwork& network, + const std::vector& backendPreferences, + const IDeviceSpec& deviceSpec, + const OptimizerOptionsOpaque& options, + Optional&> messages); INetwork(NetworkOptions networkOptions = {}); @@ -819,12 +894,12 @@ protected: friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, const std::vector& backendPreferences, const IDeviceSpec& deviceSpec, - const OptimizerOptions& options, + const OptimizerOptionsOpaque& options, Optional&> messages); friend IOptimizedNetworkPtr Optimize(const Graph& inGraph, const std::vector& backendPreferences, const IDeviceSpec& deviceSpec, - const OptimizerOptions& options, + const OptimizerOptionsOpaque& options, Optional&> messages); IOptimizedNetwork(std::unique_ptr graph, const ModelOptions& modelOptions); @@ -844,7 +919,7 @@ protected: IOptimizedNetworkPtr Optimize(const INetwork& network, const std::vector& backendPreferences, const IDeviceSpec& deviceSpec, - const OptimizerOptions& options = OptimizerOptions(), + const OptimizerOptionsOpaque& options = OptimizerOptionsOpaque(), Optional&> messages = EmptyOptional()); /// Create an optimized version of the network @@ -856,9 +931,24 @@ IOptimizedNetworkPtr Optimize(const INetwork& network, /// @return An IOptimizedNetworkPtr interface to the optimized network, throws an exception derived from /// armnn::Exception if process fails. +IOptimizedNetworkPtr Optimize(const Graph& inGraph, + const std::vector& backendPreferences, + const IDeviceSpec& deviceSpec, + const OptimizerOptionsOpaque& options, + Optional&> messages = EmptyOptional()); + +/// Accept legacy OptimizerOptions IOptimizedNetworkPtr Optimize(const Graph& inGraph, const std::vector& backendPreferences, const IDeviceSpec& deviceSpec, const OptimizerOptions& options, Optional&> messages = EmptyOptional()); + +/// Accept legacy OptimizerOptions +IOptimizedNetworkPtr Optimize(const INetwork& network, + const std::vector& backendPreferences, + const IDeviceSpec& deviceSpec, + const OptimizerOptions& options, + Optional&> messages = EmptyOptional()); + } //namespace armnn -- cgit v1.2.1