aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-09-10 13:37:32 +0100
committerRyan O'Shea <ryan.oshea2@arm.com>2020-09-10 18:04:17 +0000
commit045f6be924240a560293a3a7a0ecae49bcf0d1fa (patch)
tree3193fb35288ad8011cdfb9082d82085f48b6792b /include
parent08f4016b8ae8ee836fc813abcbc7db826924f3ec (diff)
downloadarmnn-045f6be924240a560293a3a7a0ecae49bcf0d1fa.tar.gz
IVGCVSW-5156 Introduce ModelOptions to OptimizedNetwork
* Introduced ModelOptions to IBackendInternal * Introduced ModelOptions to Network * Added FastMathEnabled parameter to Conv2d Validate function in CL and NEON * Added Optimizer tests Signed-off-by: Ryan OShea <Ryan.OShea2@arm.com> Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ib54c1e82cb3d89a52756ed499cf91b6a7fdb2063
Diffstat (limited to 'include')
-rw-r--r--include/armnn/BackendOptions.hpp19
-rw-r--r--include/armnn/INetwork.hpp11
-rw-r--r--include/armnn/backends/IBackendContext.hpp7
-rw-r--r--include/armnn/backends/IBackendInternal.hpp13
4 files changed, 48 insertions, 2 deletions
diff --git a/include/armnn/BackendOptions.hpp b/include/armnn/BackendOptions.hpp
index 44438b2f7c..4aee070866 100644
--- a/include/armnn/BackendOptions.hpp
+++ b/include/armnn/BackendOptions.hpp
@@ -14,6 +14,8 @@ namespace armnn
struct BackendOptions;
using NetworkOptions = std::vector<BackendOptions>;
+using ModelOptions = std::vector<BackendOptions>;
+
/// Struct for the users to pass backend specific options
struct BackendOptions
{
@@ -262,4 +264,21 @@ private:
std::vector<BackendOption> m_Options;
};
+
+template <typename F>
+void ParseOptions(const std::vector<BackendOptions>& options, BackendId backend, F f)
+{
+ for (auto optionsGroup : options)
+ {
+ if (optionsGroup.GetBackendId() == backend)
+ {
+ for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
+ {
+ const BackendOptions::BackendOption option = optionsGroup.GetOption(i);
+ f(option.GetName(), option.GetValue());
+ }
+ }
+ }
+}
+
} //namespace armnn
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1d4939e03d..70ad94fa51 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -614,14 +614,17 @@ struct OptimizerOptions
, m_ReduceFp32ToBf16(false)
, m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
, m_ImportEnabled(false)
+ , m_ModelOptions()
{}
- OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled)
+ OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
+ ModelOptions modelOptions = {})
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
, m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
, m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
{
if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
{
@@ -631,12 +634,13 @@ struct OptimizerOptions
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
- bool importEnabled = false)
+ bool importEnabled = false, ModelOptions modelOptions = {})
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
, m_shapeInferenceMethod(shapeInferenceMethod)
, m_ImportEnabled(importEnabled)
+ , m_ModelOptions(modelOptions)
{
if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
{
@@ -658,6 +662,9 @@ struct OptimizerOptions
// Enable Import
bool m_ImportEnabled;
+
+ // Enable Model Options
+ ModelOptions m_ModelOptions;
};
/// Create an optimized version of the network
diff --git a/include/armnn/backends/IBackendContext.hpp b/include/armnn/backends/IBackendContext.hpp
index b12c99f733..ae85b6354b 100644
--- a/include/armnn/backends/IBackendContext.hpp
+++ b/include/armnn/backends/IBackendContext.hpp
@@ -4,6 +4,7 @@
//
#pragma once
+#include <armnn/BackendOptions.hpp>
#include <armnn/IRuntime.hpp>
#include <memory>
@@ -29,4 +30,10 @@ public:
using IBackendContextUniquePtr = std::unique_ptr<IBackendContext>;
+class IBackendModelContext
+{
+public:
+ virtual ~IBackendModelContext() {}
+};
+
} // namespace armnn \ No newline at end of file
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index 6771e7b9f5..ee9cb49562 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -86,6 +86,8 @@ public:
using Optimizations = std::vector<OptimizationPtr>;
using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
+ using IBackendSpecificModelContextPtr = std::shared_ptr<IBackendModelContext>;
+
using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
@@ -125,12 +127,23 @@ public:
/// The default implementation always returns a default-constructed pointer.
virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const;
+ virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions& modelOptions) const;
+
/// Create context specifically used for profiling interaction from backends.
virtual IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
IBackendProfilingPtr& backendProfiling);
virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
+ virtual ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const
+ {
+ if (modelOptions.empty())
+ {
+ return GetLayerSupport();
+ }
+ return GetLayerSupport(modelOptions);
+ }
+
virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const;
bool SupportsTensorAllocatorAPI() const;