aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-11-28 15:45:42 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-12-09 12:00:04 +0000
commite5b8eb9fe8147a0849db08ef0898a0e8bef920b4 (patch)
tree54f039796753b6a395eb9f76e46e11a9413dabad /include
parent3e2969d7195d77796774101580b837681505904a (diff)
downloadarmnn-e5b8eb9fe8147a0849db08ef0898a0e8bef920b4.tar.gz
IVGCVSW-4210 Create a public API for the common backend files
* Create a public API for the common backend files * Move OutputHandler to armnn internal * Remove unused headers Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com> Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I3e86d908b021e3561befa9d45158d87d2cbb18c0
Diffstat (limited to 'include')
-rw-r--r--include/armnn/backends/CMakeLists.txt17
-rw-r--r--include/armnn/backends/CpuTensorHandleFwd.hpp17
-rw-r--r--include/armnn/backends/DynamicBackend.hpp54
-rw-r--r--include/armnn/backends/IBackendContext.hpp32
-rw-r--r--include/armnn/backends/IBackendInternal.hpp138
-rw-r--r--include/armnn/backends/IMemoryManager.hpp26
-rw-r--r--include/armnn/backends/ITensorHandle.hpp77
-rw-r--r--include/armnn/backends/ITensorHandleFactory.hpp72
-rw-r--r--include/armnn/backends/OptimizationViews.hpp70
9 files changed, 503 insertions, 0 deletions
diff --git a/include/armnn/backends/CMakeLists.txt b/include/armnn/backends/CMakeLists.txt
new file mode 100644
index 0000000000..258ea8b9f9
--- /dev/null
+++ b/include/armnn/backends/CMakeLists.txt
@@ -0,0 +1,17 @@
+#
+# Copyright © 2019 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnBackendsAPI_sources
+ CpuTensorHandleFwd.hpp
+ DynamicBackend.hpp
+ IBackendInternal.hpp
+ IBackendContext.hpp
+ ITensorHandleFactory.hpp
+ IMemoryManager.hpp
+ ITensorHandle.hpp
+ OptimizationViews.hpp
+)
+
+add_library(armnnBackendsAPI OBJECT ${armnnBackendsAPI_sources})
diff --git a/include/armnn/backends/CpuTensorHandleFwd.hpp b/include/armnn/backends/CpuTensorHandleFwd.hpp
new file mode 100644
index 0000000000..9d5547b9e5
--- /dev/null
+++ b/include/armnn/backends/CpuTensorHandleFwd.hpp
@@ -0,0 +1,17 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+class ConstCpuTensorHandle;
+class CpuTensorHandle;
+class ScopedCpuTensorHandle;
+class PassthroughCpuTensorHandle;
+class ConstPassthroughCpuTensorHandle;
+
+} // namespace armnn
diff --git a/include/armnn/backends/DynamicBackend.hpp b/include/armnn/backends/DynamicBackend.hpp
new file mode 100644
index 0000000000..f888b1e27e
--- /dev/null
+++ b/include/armnn/backends/DynamicBackend.hpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "IBackendInternal.hpp"
+
+#include <armnn/BackendRegistry.hpp>
+
+#include <functional>
+#include <memory>
+
+namespace armnn
+{
+
+class DynamicBackend final
+{
+public:
+ using HandleCloser = std::function<void(const void*)>;
+ using HandlePtr = std::unique_ptr<void, HandleCloser>;
+
+ explicit DynamicBackend(const void* sharedObjectHandle);
+
+ /// Public dynamic backend functions
+ BackendId GetBackendId();
+ BackendVersion GetBackendVersion();
+ IBackendInternalUniquePtr GetBackend();
+ BackendRegistry::FactoryFunction GetFactoryFunction();
+
+private:
+ /// Private utility functions
+ template<typename BackendFunctionType>
+ BackendFunctionType SetFunctionPointer(const std::string& backendFunctionName);
+ IBackendInternalUniquePtr CreateBackend();
+
+ /// Backend function pointer types
+ using IdFunctionType = const char*(*)();
+ using VersionFunctionType = void(*)(uint32_t*, uint32_t*);
+ using FactoryFunctionType = void*(*)();
+
+ /// Backend function pointers
+ IdFunctionType m_BackendIdFunction;
+ VersionFunctionType m_BackendVersionFunction;
+ FactoryFunctionType m_BackendFactoryFunction;
+
+ /// Shared object handle
+ HandlePtr m_Handle;
+};
+
+using DynamicBackendPtr = std::unique_ptr<DynamicBackend>;
+
+} // namespace armnn
diff --git a/include/armnn/backends/IBackendContext.hpp b/include/armnn/backends/IBackendContext.hpp
new file mode 100644
index 0000000000..de9824956f
--- /dev/null
+++ b/include/armnn/backends/IBackendContext.hpp
@@ -0,0 +1,32 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/IRuntime.hpp>
+#include <memory>
+
+namespace armnn
+{
+
+class IBackendContext
+{
+protected:
+ IBackendContext(const IRuntime::CreationOptions&) {}
+
+public:
+ // Before and after Load network events
+ virtual bool BeforeLoadNetwork(NetworkId networkId) = 0;
+ virtual bool AfterLoadNetwork(NetworkId networkId) = 0;
+
+ // Before and after Unload network events
+ virtual bool BeforeUnloadNetwork(NetworkId networkId) = 0;
+ virtual bool AfterUnloadNetwork(NetworkId networkId) = 0;
+
+ virtual ~IBackendContext() {}
+};
+
+using IBackendContextUniquePtr = std::unique_ptr<IBackendContext>;
+
+} // namespace armnn \ No newline at end of file
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
new file mode 100644
index 0000000000..3296d81b7c
--- /dev/null
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -0,0 +1,138 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Types.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/Deprecated.hpp>
+
+#include <ISubgraphViewConverter.hpp>
+#include <SubgraphView.hpp>
+#include <optimizations/Optimization.hpp>
+
+#include "IBackendContext.hpp"
+#include "IMemoryManager.hpp"
+#include "ITensorHandleFactory.hpp"
+#include "OptimizationViews.hpp"
+
+#include <vector>
+#include <memory>
+
+namespace armnn
+{
+class IWorkloadFactory;
+class IMemoryManager;
+class ILayerSupport;
+
+struct BackendVersion
+{
+ uint32_t m_Major;
+ uint32_t m_Minor;
+
+ constexpr BackendVersion()
+ : m_Major(0)
+ , m_Minor(0)
+ {}
+ constexpr BackendVersion(uint32_t major, uint32_t minor)
+ : m_Major(major)
+ , m_Minor(minor)
+ {}
+
+ bool operator==(const BackendVersion& other) const
+ {
+ return this == &other ||
+ (this->m_Major == other.m_Major &&
+ this->m_Minor == other.m_Minor);
+ }
+
+ bool operator<=(const BackendVersion& other) const
+ {
+ return this->m_Major < other.m_Major ||
+ (this->m_Major == other.m_Major &&
+ this->m_Minor <= other.m_Minor);
+ }
+};
+
+inline std::ostream& operator<<(std::ostream& os, const BackendVersion& backendVersion)
+{
+ os << "[" << backendVersion.m_Major << "." << backendVersion.m_Minor << "]";
+
+ return os;
+}
+
+class IBackendInternal : public IBackend
+{
+protected:
+ // Creation must be done through a specific
+ // backend interface.
+ IBackendInternal() = default;
+
+public:
+ // Allow backends created by the factory function
+ // to be destroyed through IBackendInternal.
+ ~IBackendInternal() override = default;
+
+ using IWorkloadFactoryPtr = std::unique_ptr<IWorkloadFactory>;
+ using IBackendContextPtr = std::unique_ptr<IBackendContext>;
+ using OptimizationPtr = std::unique_ptr<Optimization>;
+ using Optimizations = std::vector<OptimizationPtr>;
+ using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
+
+ using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
+ using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
+
+ using GraphUniquePtr = std::unique_ptr<Graph>;
+ using SubgraphViewUniquePtr = std::unique_ptr<SubgraphView>;
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ using ISubGraphConverterPtr ARMNN_DEPRECATED_MSG("This type is no longer supported")
+ = std::unique_ptr<ISubGraphConverter>;
+ using SubGraphUniquePtr ARMNN_DEPRECATED_MSG("SubGraph is deprecated, use SubgraphView instead")
+ = std::unique_ptr<SubGraph>;
+
+ ARMNN_DEPRECATED_MSG("This method is no longer supported")
+ virtual ISubGraphConverterPtr CreateSubGraphConverter(const std::shared_ptr<SubGraph>& subGraph) const;
+
+ ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
+ virtual Optimizations GetOptimizations() const;
+
+ ARMNN_DEPRECATED_MSG("Use \"OptimizationViews OptimizeSubgraphView(const SubgraphView&)\" instead")
+ virtual SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph, bool& optimizationAttempted) const;
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ virtual IMemoryManagerUniquePtr CreateMemoryManager() const;
+
+ virtual IWorkloadFactoryPtr CreateWorkloadFactory(
+ const IMemoryManagerSharedPtr& memoryManager = nullptr) const = 0;
+
+ virtual IWorkloadFactoryPtr CreateWorkloadFactory(
+ class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const;
+
+ virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const;
+
+ virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
+
+ virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const;
+
+ bool SupportsTensorAllocatorAPI() const;
+
+ ITensorHandleFactory::FactoryId GetBackwardCompatibleFavoriteHandleFactory();
+
+ /// (Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
+ virtual std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const;
+
+ /// (Optional) Register TensorHandleFactories
+ /// Either this method or CreateMemoryManager() and
+ /// IWorkloadFactory::CreateTensor()/IWorkloadFactory::CreateSubtensor() methods must be implemented.
+ virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) {}
+
+ /// Returns the version of the Backend API
+ static constexpr BackendVersion GetApiVersion() { return BackendVersion(1, 0); }
+};
+
+using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
+
+} // namespace armnn
diff --git a/include/armnn/backends/IMemoryManager.hpp b/include/armnn/backends/IMemoryManager.hpp
new file mode 100644
index 0000000000..28b81e79ef
--- /dev/null
+++ b/include/armnn/backends/IMemoryManager.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <memory>
+
+namespace armnn
+{
+
+class IMemoryManager
+{
+protected:
+ IMemoryManager() {}
+
+public:
+ virtual void Acquire() = 0;
+ virtual void Release() = 0;
+
+ virtual ~IMemoryManager() {}
+};
+
+using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
+
+} // namespace armnn \ No newline at end of file
diff --git a/include/armnn/backends/ITensorHandle.hpp b/include/armnn/backends/ITensorHandle.hpp
new file mode 100644
index 0000000000..e1b80b874a
--- /dev/null
+++ b/include/armnn/backends/ITensorHandle.hpp
@@ -0,0 +1,77 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/MemorySources.hpp>
+
+namespace armnn
+{
+
+class TensorShape;
+
+class ITensorHandle
+{
+public:
+ virtual ~ITensorHandle(){}
+
+ /// Indicate to the memory manager that this resource is active.
+ /// This is used to compute overlapping lifetimes of resources.
+ virtual void Manage() = 0;
+
+ /// Indicate to the memory manager that this resource is no longer active.
+ /// This is used to compute overlapping lifetimes of resources.
+ virtual void Allocate() = 0;
+
+ /// Get the parent tensor if this is a subtensor.
+ /// \return a pointer to the parent tensor. Otherwise nullptr if not a subtensor.
+ virtual ITensorHandle* GetParent() const = 0;
+
+ /// Map the tensor data for access.
+ /// \param blocking hint to block the calling thread until all other accesses are complete. (backend dependent)
+ /// \return pointer to the first element of the mapped data.
+ virtual const void* Map(bool blocking=true) const = 0;
+
+ /// Unmap the tensor data
+ virtual void Unmap() const = 0;
+
+ /// Map the tensor data for access. Must be paired with call to Unmap().
+ /// \param blocking hint to block the calling thread until all other accesses are complete. (backend dependent)
+ /// \return pointer to the first element of the mapped data.
+ void* Map(bool blocking=true)
+ {
+ return const_cast<void*>(static_cast<const ITensorHandle*>(this)->Map(blocking));
+ }
+
+ /// Unmap the tensor data that was previously mapped with call to Map().
+ void Unmap()
+ {
+ return static_cast<const ITensorHandle*>(this)->Unmap();
+ }
+
+ /// Get the strides for each dimension ordered from largest to smallest where
+ /// the smallest value is the same as the size of a single element in the tensor.
+ /// \return a TensorShape filled with the strides for each dimension
+ virtual TensorShape GetStrides() const = 0;
+
+ /// Get the number of elements for each dimension ordered from slowest iterating dimension
+ /// to fastest iterating dimension.
+ /// \return a TensorShape filled with the number of elements for each dimension.
+ virtual TensorShape GetShape() const = 0;
+
+ // Testing support to be able to verify and set tensor data content
+ virtual void CopyOutTo(void* memory) const = 0;
+ virtual void CopyInFrom(const void* memory) = 0;
+
+ /// Get flags describing supported import sources.
+ virtual unsigned int GetImportFlags() const { return 0; }
+
+ /// Import externally allocated memory
+ /// \param memory base address of the memory being imported.
+ /// \param source source of the allocation for the memory being imported.
+ /// \return true on success or false on failure
+ virtual bool Import(void* memory, MemorySource source) { return false; };
+};
+
+}
diff --git a/include/armnn/backends/ITensorHandleFactory.hpp b/include/armnn/backends/ITensorHandleFactory.hpp
new file mode 100644
index 0000000000..2e4742301b
--- /dev/null
+++ b/include/armnn/backends/ITensorHandleFactory.hpp
@@ -0,0 +1,72 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/IRuntime.hpp>
+#include <armnn/MemorySources.hpp>
+#include <armnn/Types.hpp>
+#include "ITensorHandle.hpp"
+
+#include <boost/core/ignore_unused.hpp>
+
+namespace armnn
+{
+
+class ITensorHandleFactory
+{
+public:
+ using FactoryId = std::string;
+ static const FactoryId LegacyFactoryId; // Use the workload factory to create the tensor handle
+ static const FactoryId DeferredFactoryId; // Some TensorHandleFactory decisions are deferred to run-time
+
+ virtual ~ITensorHandleFactory() {}
+
+ virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
+ TensorShape const& subTensorShape,
+ unsigned int const* subTensorOrigin) const = 0;
+
+ virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0;
+
+ virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout) const = 0;
+
+ // Utility Functions for backends which require TensorHandles to have unmanaged memory.
+ // These should be overloaded if required to facilitate direct import of input tensors
+ // and direct export of output tensors.
+ virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const
+ {
+ boost::ignore_unused(IsMemoryManaged);
+ return CreateTensorHandle(tensorInfo);
+ }
+
+ virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const
+ {
+ boost::ignore_unused(IsMemoryManaged);
+ return CreateTensorHandle(tensorInfo, dataLayout);
+ }
+
+ virtual const FactoryId& GetId() const = 0;
+
+ virtual bool SupportsSubTensors() const = 0;
+
+ virtual bool SupportsMapUnmap() const final { return true; }
+
+ virtual MemorySourceFlags GetExportFlags() const { return 0; }
+ virtual MemorySourceFlags GetImportFlags() const { return 0; }
+};
+
+enum class EdgeStrategy
+{
+ Undefined, /// No strategy has been defined. Used internally to verify integrity of optimizations.
+ DirectCompatibility, /// Destination backend can work directly with tensors on source backend.
+ ExportToTarget, /// Source backends tensor data can be exported to destination backend tensor without copy.
+ CopyToTarget /// Copy contents from source backend tensor to destination backend tensor.
+};
+
+} //namespace armnn
diff --git a/include/armnn/backends/OptimizationViews.hpp b/include/armnn/backends/OptimizationViews.hpp
new file mode 100644
index 0000000000..c357c0c8bb
--- /dev/null
+++ b/include/armnn/backends/OptimizationViews.hpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <SubgraphView.hpp>
+
+namespace armnn
+{
+
+class OptimizationViews
+{
+public:
+ OptimizationViews() = default;
+ OptimizationViews(const OptimizationViews&) = delete;
+ OptimizationViews& operator=(const OptimizationViews&) = delete;
+ OptimizationViews(OptimizationViews&&) = default;
+ OptimizationViews& operator=(OptimizationViews&&) = default;
+
+ struct SubstitutionPair
+ {
+ /// Subgraph of Layers from the original graph which should be replaced
+ SubgraphView m_SubstitutableSubgraph;
+
+ /// A subgraph of new layers which will replace layers in m_SubstitutableSubgraph
+ SubgraphView m_ReplacementSubgraph;
+ };
+
+ using Subgraphs = std::vector<SubgraphView>;
+ using Substitutions = std::vector<SubstitutionPair>;
+
+ void AddSubstitution(SubstitutionPair&& substitution)
+ {
+ m_SuccesfulOptimizations.emplace_back(substitution);
+ }
+
+ void AddFailedSubgraph(SubgraphView&& subgraph)
+ {
+ m_FailedOptimizations.emplace_back(subgraph);
+ }
+
+ void AddUntouchedSubgraph(SubgraphView&& subgraph)
+ {
+ m_UntouchedSubgraphs.emplace_back(subgraph);
+ }
+
+ const Substitutions& GetSubstitutions() const { return m_SuccesfulOptimizations; }
+ const Subgraphs& GetFailedSubgraphs() const { return m_FailedOptimizations; }
+ const Subgraphs& GetUntouchedSubgraphs() const { return m_UntouchedSubgraphs; }
+
+ Substitutions& GetSubstitutions() { return m_SuccesfulOptimizations; }
+ Subgraphs& GetFailedSubgraphs() { return m_FailedOptimizations; }
+ Subgraphs& GetUntouchedSubgraphs() { return m_UntouchedSubgraphs; }
+
+ bool Validate(const SubgraphView& originalSubgraph) const;
+
+ Graph& GetGraph() { return m_Graph; }
+
+private:
+ Substitutions m_SuccesfulOptimizations; ///< Proposed substitutions from successful optimizations
+ Subgraphs m_FailedOptimizations; ///< Subgraphs from the original subgraph which cannot be supported
+ Subgraphs m_UntouchedSubgraphs; ///< Subgraphs from the original subgraph which remain unmodified
+
+ /// Graph object used only as a container for any layer generated by the optimization process
+ Graph m_Graph;
+};
+
+} //namespace armnn