From 045f6be924240a560293a3a7a0ecae49bcf0d1fa Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Thu, 10 Sep 2020 13:37:32 +0100 Subject: IVGCVSW-5156 Introduce ModelOptions to OptimizedNetwork * Introduced ModelOptions to IBackendInternal * Introduced ModelOptions to Network * Added FastMathEnabled parameter to Conv2d Validate function in CL and NEON * Added Optimizer tests Signed-off-by: Ryan OShea Signed-off-by: Sadik Armagan Change-Id: Ib54c1e82cb3d89a52756ed499cf91b6a7fdb2063 --- include/armnn/BackendOptions.hpp | 19 +++++++++++++++++++ include/armnn/INetwork.hpp | 11 +++++++++-- include/armnn/backends/IBackendContext.hpp | 7 +++++++ include/armnn/backends/IBackendInternal.hpp | 13 +++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp index 44438b2f7c..4aee070866 100644 --- a/include/armnn/BackendOptions.hpp +++ b/include/armnn/BackendOptions.hpp @@ -14,6 +14,8 @@ namespace armnn struct BackendOptions; using NetworkOptions = std::vector; +using ModelOptions = std::vector; + /// Struct for the users to pass backend specific options struct BackendOptions { @@ -262,4 +264,21 @@ private: std::vector m_Options; }; + +template +void ParseOptions(const std::vector& options, BackendId backend, F f) +{ + for (auto optionsGroup : options) + { + if (optionsGroup.GetBackendId() == backend) + { + for (size_t i=0; i < optionsGroup.GetOptionCount(); i++) + { + const BackendOptions::BackendOption option = optionsGroup.GetOption(i); + f(option.GetName(), option.GetValue()); + } + } + } +} + } //namespace armnn diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 1d4939e03d..70ad94fa51 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -614,14 +614,17 @@ struct OptimizerOptions , m_ReduceFp32ToBf16(false) , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) , m_ImportEnabled(false) + , m_ModelOptions() {} - OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled) + OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled, + ModelOptions modelOptions = {}) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) , m_ImportEnabled(importEnabled) + , m_ModelOptions(modelOptions) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -631,12 +634,13 @@ struct OptimizerOptions OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly, - bool importEnabled = false) + bool importEnabled = false, ModelOptions modelOptions = {}) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) , m_shapeInferenceMethod(shapeInferenceMethod) , m_ImportEnabled(importEnabled) + , m_ModelOptions(modelOptions) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -658,6 +662,9 @@ struct OptimizerOptions // Enable Import bool m_ImportEnabled; + + // Enable Model Options + ModelOptions m_ModelOptions; }; /// Create an optimized version of the network diff --git a/include/armnn/backends/IBackendContext.hpp b/include/armnn/backends/IBackendContext.hpp index b12c99f733..ae85b6354b 100644 --- a/include/armnn/backends/IBackendContext.hpp +++ b/include/armnn/backends/IBackendContext.hpp @@ -4,6 +4,7 @@ // #pragma once +#include #include #include @@ -29,4 +30,10 @@ public: using IBackendContextUniquePtr = std::unique_ptr; +class IBackendModelContext +{ +public: + virtual ~IBackendModelContext() {} +}; + } // namespace armnn \ No newline at end of file diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp index 6771e7b9f5..ee9cb49562 100644 --- a/include/armnn/backends/IBackendInternal.hpp +++ b/include/armnn/backends/IBackendInternal.hpp @@ -86,6 +86,8 @@ public: using Optimizations = std::vector; using ILayerSupportSharedPtr = std::shared_ptr; + using IBackendSpecificModelContextPtr = std::shared_ptr; + using IMemoryManagerUniquePtr = std::unique_ptr; using IMemoryManagerSharedPtr = std::shared_ptr; @@ -125,12 +127,23 @@ public: /// The default implementation always returns a default-constructed pointer. virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const; + virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions& modelOptions) const; + /// Create context specifically used for profiling interaction from backends. virtual IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions, IBackendProfilingPtr& backendProfiling); virtual ILayerSupportSharedPtr GetLayerSupport() const = 0; + virtual ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const + { + if (modelOptions.empty()) + { + return GetLayerSupport(); + } + return GetLayerSupport(modelOptions); + } + virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const; bool SupportsTensorAllocatorAPI() const; -- cgit v1.2.1