aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorDavid Monahan <david.monahan@arm.com>2019-08-20 11:25:29 +0100
committerDavid Monahan <david.monahan@arm.com>2019-09-24 10:50:30 +0000
commit3fb7e105ae62cbfb3ebf1edebb90e2b6672b22aa (patch)
tree47793a736e1fb53d51b4c0fd755f4e24f7f93d98 /src/armnn
parent93667b1d7c361df68bdb1d733f17aba3ba34e046 (diff)
downloadarmnn-3fb7e105ae62cbfb3ebf1edebb90e2b6672b22aa.tar.gz
IVGCVSW-3623 Implement NeonTensorHandle::Import
Signed-off-by: David Monahan <david.monahan@arm.com> Change-Id: I7213788725fd4e4cf1176998604e999d0b7ed6cc
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Layer.cpp9
-rw-r--r--src/armnn/Layer.hpp4
-rw-r--r--src/armnn/LoadedNetwork.cpp17
-rw-r--r--src/armnn/layers/ConcatLayer.cpp4
-rw-r--r--src/armnn/layers/ConcatLayer.hpp4
-rw-r--r--src/armnn/layers/OutputLayer.hpp6
-rw-r--r--src/armnn/layers/SplitterLayer.cpp4
-rw-r--r--src/armnn/layers/SplitterLayer.hpp4
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp12
9 files changed, 47 insertions, 17 deletions
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 1e384336c9..dbeda22ca0 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -244,7 +244,9 @@ void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector, const G
}
}
-void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry, const IWorkloadFactory& workloadFactory)
+void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
+ const IWorkloadFactory& workloadFactory,
+ const bool IsMemoryManaged)
{
for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
{
@@ -255,14 +257,13 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry, con
OutputHandler& handler = GetOutputHandler(idx);
if (factoryId == ITensorHandleFactory::LegacyFactoryId)
{
- handler.CreateTensorHandles(workloadFactory);
+ handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
}
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
BOOST_ASSERT(handleFactory);
-
- handler.CreateTensorHandles(*handleFactory);
+ handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
}
}
}
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index c571e50a95..5f2c070681 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -267,7 +267,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
- virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry, const IWorkloadFactory& factory);
+ virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
+ const IWorkloadFactory& factory,
+ const bool IsMemoryManaged = true);
/// Creates a dynamically-allocated copy of this layer.
/// @param graph - The Graph into which this Layer is being cloned.
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 1000eceda0..7ee4e612e0 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -120,8 +120,21 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
for (auto&& layer : order)
{
- auto& workloadFacory = GetWorkloadFactory(*layer);
- layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFacory);
+ auto& workloadFactory = GetWorkloadFactory(*layer);
+
+ switch (layer->GetType())
+ {
+ case LayerType::Input:
+ {
+ // If IsImportEnabled is true then we need to set IsMemoryManaged to false when creating TensorHandles
+ layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory, !m_IsImportEnabled);
+ break;
+ }
+ default:
+ {
+ layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory);
+ }
+ }
}
//Then create workloads.
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 24051a24d2..9b1785850a 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -119,8 +119,10 @@ void ConcatLayer::CreateTensors(const FactoryType& factory)
}
void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
- const IWorkloadFactory& workloadFactory)
+ const IWorkloadFactory& workloadFactory,
+ const bool IsMemoryManaged)
{
+ boost::ignore_unused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp
index e8ff2e452b..10a7fd8e74 100644
--- a/src/armnn/layers/ConcatLayer.hpp
+++ b/src/armnn/layers/ConcatLayer.hpp
@@ -24,8 +24,10 @@ public:
/// otherwise creates tensor handlers.
/// @param [in] registry Contains all the registered tensor handle factories available for use.
/// @param [in] factory The workload factory which will create the workload.
+ /// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
- const IWorkloadFactory& factory) override;
+ const IWorkloadFactory& factory,
+ const bool IsMemoryManaged = true) override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 2aa2dbd6c9..c9615cca66 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -24,10 +24,12 @@ public:
/// otherwise creates tensor handlers by default. Ignores parameters for Output type.
/// @param [in] registry Contains all the registered tensor handle factories available for use.
/// @param [in] factory The workload factory which will create the workload.
+ /// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
- const IWorkloadFactory& factory) override
+ const IWorkloadFactory& factory,
+ const bool IsMemoryManaged = true) override
{
- boost::ignore_unused(registry, factory);
+ boost::ignore_unused(registry, factory, IsMemoryManaged);
}
/// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index dc04b3fd15..e8452462f3 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -101,8 +101,10 @@ void SplitterLayer::CreateTensors(const FactoryType& factory)
}
void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
- const IWorkloadFactory& workloadFactory)
+ const IWorkloadFactory& workloadFactory,
+ const bool IsMemoryManaged)
{
+ boost::ignore_unused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp
index 9c684d479f..26d5b76a2d 100644
--- a/src/armnn/layers/SplitterLayer.hpp
+++ b/src/armnn/layers/SplitterLayer.hpp
@@ -24,9 +24,11 @@ public:
/// otherwise creates tensor handlers.
/// @param [in] registry Contains all the registered tensor handle factories available for use.
/// @param [in] factory The workload factory which will create the workload.
+ /// @param [in] IsMemoryManaged Determine whether or not to assign a memory manager during creation
//virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
- const IWorkloadFactory& factory) override;
+ const IWorkloadFactory& factory,
+ const bool IsMemoryManaged = true) override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 3c53b13e1a..ceb6e4dbc2 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -45,13 +45,15 @@ public:
return nullptr;
}
- std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const override
{
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
- DataLayout dataLayout) const override
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const override
{
return nullptr;
}
@@ -83,13 +85,15 @@ public:
return nullptr;
}
- std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+ const bool IsMemoryManaged) const override
{
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
- DataLayout dataLayout) const override
+ DataLayout dataLayout,
+ const bool IsMemoryManaged) const override
{
return nullptr;
}