diff options
author | Derek Lamberti <derek.lamberti@arm.com> | 2019-06-13 11:40:08 +0100 |
---|---|---|
committer | Derek Lamberti <derek.lamberti@arm.com> | 2019-06-24 15:00:15 +0000 |
commit | 84da38b0f11ca3db0a439e510514be780f3933ff (patch) | |
tree | 56532f4842abc1ad00ae57bc20ddc72cada59b4c /src/armnn/LoadedNetwork.cpp | |
parent | 9515c7ec4f4535fff2c8f2d3f88974474d3f3468 (diff) | |
download | armnn-84da38b0f11ca3db0a439e510514be780f3933ff.tar.gz |
IVGCVSW-3277 Refactor TensorHandle factory API
* Added backend support for multiple types of TensorHandle factories
* Refactored the backend API to enable new tensor strategies
* Added mechanism to determine memory strategies during optimization
* Perform mem-copy only when Direct access is not found
* Explicitly deleted the copy-constructor from OutputSlot to prevent
accidental local copies that would cause the DisconnectAll to be
called by the destructor
Change-Id: I7e812c8e5e6c1c20db1c5932749ac70fd93db7f8
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
Diffstat (limited to 'src/armnn/LoadedNetwork.cpp')
-rw-r--r-- | src/armnn/LoadedNetwork.cpp | 36 |
1 files changed, 27 insertions, 9 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 3c7dfb07a9..7873e48780 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -85,19 +85,37 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net) //(for example the splitter and concat layers). for (auto&& layer : order) { - auto const& backend = layer->GetBackendId(); - if (m_Backends.count(backend) == 0) + auto const& backendId = layer->GetBackendId(); + if (m_Backends.count(backendId) == 0) { - auto createBackend = BackendRegistryInstance().GetFactory(backend); - auto it = m_Backends.emplace(std::make_pair(backend, createBackend())); + auto createBackend = BackendRegistryInstance().GetFactory(backendId); + auto it = m_Backends.emplace(std::make_pair(backendId, createBackend())); - IBackendInternal::IMemoryManagerSharedPtr memoryManager = it.first->second->CreateMemoryManager(); - auto workloadFactory = it.first->second->CreateWorkloadFactory(memoryManager); + IBackendInternal* backend = it.first->second.get(); - m_WorkloadFactories.emplace(std::make_pair(backend, - std::make_pair(std::move(workloadFactory), memoryManager))); + if (backend->SupportsTensorAllocatorAPI()) + { + backend->RegisterTensorHandleFactories(m_TensorHandleFactoryRegistry); + + auto workloadFactory = backend->CreateWorkloadFactory(); + m_WorkloadFactories.emplace( + std::make_pair(backendId, std::make_pair(std::move(workloadFactory), nullptr))); + } + else + { + IBackendInternal::IMemoryManagerSharedPtr memoryManager = backend->CreateMemoryManager(); + auto workloadFactory = backend->CreateWorkloadFactory(memoryManager); + + m_WorkloadFactories.emplace( + std::make_pair(backendId, std::make_pair(std::move(workloadFactory), memoryManager))); + } } - layer->CreateTensorHandles(m_OptimizedNetwork->GetGraph(), GetWorkloadFactory(*layer)); + } + + for (auto&& layer : order) + { + auto& workloadFacory = GetWorkloadFactory(*layer); + layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFacory); } //Then create workloads. |