diff options
author | Derek Lamberti <derek.lamberti@arm.com> | 2019-06-13 11:40:08 +0100 |
---|---|---|
committer | Derek Lamberti <derek.lamberti@arm.com> | 2019-06-24 15:00:15 +0000 |
commit | 84da38b0f11ca3db0a439e510514be780f3933ff (patch) | |
tree | 56532f4842abc1ad00ae57bc20ddc72cada59b4c /src/armnn/layers | |
parent | 9515c7ec4f4535fff2c8f2d3f88974474d3f3468 (diff) | |
download | armnn-84da38b0f11ca3db0a439e510514be780f3933ff.tar.gz |
IVGCVSW-3277 Refactor TensorHandle factory API
* Added backend support for multiple types of TensorHandle factories
* Refactored the backend API to enable new tensor strategies
* Added mechanism to determine memory strategies during optimization
* Perform mem-copy only when Direct access is not found
* Explicitly deleted the copy-constructor from OutputSlot to prevent
accidental local copies that would cause the DisconnectAll to be
called by the destructor
Change-Id: I7e812c8e5e6c1c20db1c5932749ac70fd93db7f8
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/armnn/layers')
-rw-r--r-- | src/armnn/layers/ConcatLayer.cpp | 26 | ||||
-rw-r--r-- | src/armnn/layers/ConcatLayer.hpp | 11 | ||||
-rw-r--r-- | src/armnn/layers/OutputLayer.hpp | 7 | ||||
-rw-r--r-- | src/armnn/layers/SplitterLayer.cpp | 27 | ||||
-rw-r--r-- | src/armnn/layers/SplitterLayer.hpp | 10 |
5 files changed, 70 insertions, 11 deletions
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index 1d2641cd60..24051a24d2 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -34,7 +34,8 @@ std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const Graph& graph, const return factory.CreateConcat(descriptor, PrepInfoAndDesc(descriptor, graph)); } -void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) +template<typename FactoryType> +void ConcatLayer::CreateTensors(const FactoryType& factory) { //If sub tensors are supported then the concat //just needs to make sure that the outputs of the prev layer @@ -43,6 +44,8 @@ void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact if (factory.SupportsSubTensors()) { + ITensorHandleFactory::FactoryId factoryId = GetOutputSlot(0).GetTensorHandleFactoryId(); + std::queue<ConcatLayer*> m_ConcatLayers; m_ConcatLayers.push(this); @@ -66,7 +69,8 @@ void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact auto CreateSubTensor = [&]() { // Make sure quantization parameters are in the same space - if (parentInfo.IsTypeSpaceMatch(info)) + if (parentInfo.IsTypeSpaceMatch(info) && + factoryId == slot->GetTensorHandleFactoryId()) { return factory.CreateSubTensorHandle(*parentTensor, info.GetShape(), @@ -114,6 +118,24 @@ void ConcatLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact } } +void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry, + const IWorkloadFactory& workloadFactory) +{ + OutputSlot& slot = GetOutputSlot(0); + ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId(); + + if (factoryId == ITensorHandleFactory::LegacyFactoryId) + { + CreateTensors(workloadFactory); + } + else + { + ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId); + BOOST_ASSERT(handleFactory); + CreateTensors(*handleFactory); + } +} + ConcatLayer* ConcatLayer::Clone(Graph& graph) const { return CloneBase<ConcatLayer>(graph, m_Param, GetName()); diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp index 4268291916..eb7d93ce14 100644 --- a/src/armnn/layers/ConcatLayer.hpp +++ b/src/armnn/layers/ConcatLayer.hpp @@ -22,9 +22,11 @@ public: /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported /// otherwise creates tensor handlers. - /// @param [in] graph The graph where this layer can be found. + /// @param [in] registry Contains all the registered tensor handle factories available for use. /// @param [in] factory The workload factory which will create the workload. - virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; +// virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; + virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry, + const IWorkloadFactory& factory) override; /// Creates a dynamically-allocated copy of this layer. /// @param [in] graph The graph into which this layer is being cloned. @@ -50,6 +52,11 @@ protected: /// Default destructor ~ConcatLayer() = default; + +private: + template <typename FactoryType> + void CreateTensors(const FactoryType& factory); + }; } // namespace diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp index b86f8e2dfe..2aa2dbd6c9 100644 --- a/src/armnn/layers/OutputLayer.hpp +++ b/src/armnn/layers/OutputLayer.hpp @@ -22,11 +22,12 @@ public: /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported /// otherwise creates tensor handlers by default. Ignores parameters for Output type. - /// @param [in] graph The graph where this layer can be found. + /// @param [in] registry Contains all the registered tensor handle factories available for use. /// @param [in] factory The workload factory which will create the workload. - virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override + virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry, + const IWorkloadFactory& factory) override { - boost::ignore_unused(graph, factory); + boost::ignore_unused(registry, factory); } /// Creates a dynamically-allocated copy of this layer. diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp index 4a6b2220a7..dc04b3fd15 100644 --- a/src/armnn/layers/SplitterLayer.cpp +++ b/src/armnn/layers/SplitterLayer.cpp @@ -32,7 +32,8 @@ std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const Graph& graph, con return factory.CreateSplitter(descriptor, PrepInfoAndDesc(descriptor, graph)); } -void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) +template<typename FactoryType> +void SplitterLayer::CreateTensors(const FactoryType& factory) { //If sub tensors are supported than all the "splitter" need to do is to //set the outputs to be appropriate sub tensors of the input. @@ -40,6 +41,7 @@ void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fa if (useSubTensors) { + const OutputSlot* slot = GetInputSlots()[0].GetConnectedOutputSlot(); const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler(); const TensorInfo& parentInfo = outputHandler.GetTensorInfo(); @@ -53,10 +55,13 @@ void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fa { const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo(); + OutputSlot& outSlot = GetOutputSlot(i); + ITensorHandleFactory::FactoryId factoryId = outSlot.GetTensorHandleFactoryId(); auto CreateSubTensor = [&]() { // Make sure quantization parameters are in the same space - if (parentInfo.IsTypeSpaceMatch(info)) + if (parentInfo.IsTypeSpaceMatch(info) && + factoryId == slot->GetTensorHandleFactoryId()) { return factory.CreateSubTensorHandle(*inputData, info.GetShape(), @@ -95,6 +100,24 @@ void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fa } } +void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry, + const IWorkloadFactory& workloadFactory) +{ + OutputSlot& slot = GetOutputSlot(0); + ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId(); + + if (factoryId == ITensorHandleFactory::LegacyFactoryId) + { + CreateTensors(workloadFactory); + } + else + { + ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId); + BOOST_ASSERT(handleFactory); + CreateTensors(*handleFactory); + } +} + SplitterLayer* SplitterLayer::Clone(Graph& graph) const { return CloneBase<SplitterLayer>(graph, m_Param, GetName()); diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp index 19b05562e8..9c684d479f 100644 --- a/src/armnn/layers/SplitterLayer.hpp +++ b/src/armnn/layers/SplitterLayer.hpp @@ -22,9 +22,11 @@ public: /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported /// otherwise creates tensor handlers. - /// @param [in] graph The graph where this layer can be found. + /// @param [in] registry Contains all the registered tensor handle factories available for use. /// @param [in] factory The workload factory which will create the workload. - virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; + //virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override; + virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry, + const IWorkloadFactory& factory) override; /// Creates a dynamically-allocated copy of this layer. /// @param [in] graph The graph into which this layer is being cloned. @@ -50,6 +52,10 @@ protected: /// Default destructor ~SplitterLayer() = default; + +private: + template <typename FactoryType> + void CreateTensors(const FactoryType& factory); }; } // namespace |