diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/armnn/BackendOptions.hpp | 19 | ||||
-rw-r--r-- | include/armnn/INetwork.hpp | 11 | ||||
-rw-r--r-- | include/armnn/backends/IBackendContext.hpp | 7 | ||||
-rw-r--r-- | include/armnn/backends/IBackendInternal.hpp | 13 |
4 files changed, 48 insertions, 2 deletions
diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp index 44438b2f7c..4aee070866 100644 --- a/include/armnn/BackendOptions.hpp +++ b/include/armnn/BackendOptions.hpp @@ -14,6 +14,8 @@ namespace armnn struct BackendOptions; using NetworkOptions = std::vector<BackendOptions>; +using ModelOptions = std::vector<BackendOptions>; + /// Struct for the users to pass backend specific options struct BackendOptions { @@ -262,4 +264,21 @@ private: std::vector<BackendOption> m_Options; }; + +template <typename F> +void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f) +{ + for (auto optionsGroup : options) + { + if (optionsGroup.GetBackendId() == backend) + { + for (size_t i=0; i < optionsGroup.GetOptionCount(); i++) + { + const BackendOptions::BackendOption option = optionsGroup.GetOption(i); + f(option.GetName(), option.GetValue()); + } + } + } +} + } //namespace armnn diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 1d4939e03d..70ad94fa51 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -614,14 +614,17 @@ struct OptimizerOptions , m_ReduceFp32ToBf16(false) , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) , m_ImportEnabled(false) + , m_ModelOptions() {} - OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled) + OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled, + ModelOptions modelOptions = {}) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) , m_ImportEnabled(importEnabled) + , m_ModelOptions(modelOptions) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -631,12 +634,13 @@ struct OptimizerOptions OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly, - bool importEnabled = false) + bool importEnabled = false, ModelOptions modelOptions = {}) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) , m_shapeInferenceMethod(shapeInferenceMethod) , m_ImportEnabled(importEnabled) + , m_ModelOptions(modelOptions) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -658,6 +662,9 @@ struct OptimizerOptions // Enable Import bool m_ImportEnabled; + + // Enable Model Options + ModelOptions m_ModelOptions; }; /// Create an optimized version of the network diff --git a/include/armnn/backends/IBackendContext.hpp b/include/armnn/backends/IBackendContext.hpp index b12c99f733..ae85b6354b 100644 --- a/include/armnn/backends/IBackendContext.hpp +++ b/include/armnn/backends/IBackendContext.hpp @@ -4,6 +4,7 @@ // #pragma once +#include <armnn/BackendOptions.hpp> #include <armnn/IRuntime.hpp> #include <memory> @@ -29,4 +30,10 @@ public: using IBackendContextUniquePtr = std::unique_ptr<IBackendContext>; +class IBackendModelContext +{ +public: + virtual ~IBackendModelContext() {} +}; + } // namespace armnn
\ No newline at end of file diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp index 6771e7b9f5..ee9cb49562 100644 --- a/include/armnn/backends/IBackendInternal.hpp +++ b/include/armnn/backends/IBackendInternal.hpp @@ -86,6 +86,8 @@ public: using Optimizations = std::vector<OptimizationPtr>; using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>; + using IBackendSpecificModelContextPtr = std::shared_ptr<IBackendModelContext>; + using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>; using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>; @@ -125,12 +127,23 @@ public: /// The default implementation always returns a default-constructed pointer. virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const; + virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions& modelOptions) const; + /// Create context specifically used for profiling interaction from backends. virtual IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling); virtual ILayerSupportSharedPtr GetLayerSupport() const = 0; + virtual ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const + { + if (modelOptions.empty()) + { + return GetLayerSupport(); + } + return GetLayerSupport(modelOptions); + } + virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const; bool SupportsTensorAllocatorAPI() const; |