diff options
author | Matthew Sloyan <matthew.sloyan@arm.com> | 2022-06-30 17:13:04 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-07-27 15:52:10 +0100 |
commit | 2d213a759e68f753ef4696e02a8535f7edfe421d (patch) | |
tree | f4003d0a60e907937e04d96fc434c0bfac596f86 /src/armnn/WorkingMemHandle.cpp | |
parent | 28aa6691accfd78c5eb5c4356316220d0e82ddef (diff) | |
download | armnn-2d213a759e68f753ef4696e02a8535f7edfe421d.tar.gz |
IVGCVSW-6620 Update the async api to use ExecutionData
* ExecutionData holds a void* which can be assigned to data required
for execution in a backend. WorkingMemDescriptors are used in the Ref
backend which hold TensorHandles for inputs and outputs.
* Updated ExecuteAsync functions to take ExecutionData.
* Added CreateExecutionData and UpdateExectutionData to IBackendInternal.
* Streamlined experimental IWorkingMemHandle API by removing map related
function and unused m_workingMemDescriptorMap from WorkingMemHandle.
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I54b0aab12872011743a141eb42dae200227769af
Diffstat (limited to 'src/armnn/WorkingMemHandle.cpp')
-rw-r--r-- | src/armnn/WorkingMemHandle.cpp | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/src/armnn/WorkingMemHandle.cpp b/src/armnn/WorkingMemHandle.cpp index 70bdd59373..e800c428e6 100644 --- a/src/armnn/WorkingMemHandle.cpp +++ b/src/armnn/WorkingMemHandle.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -19,20 +19,22 @@ WorkingMemHandle::WorkingMemHandle(NetworkId networkId, std::vector<InputMemDescriptorCoords> inputLayerInfo, std::vector<OutputMemDescriptorCoords> outputLayerInfo, std::vector<WorkingMemDescriptor> workingMemDescriptors, - std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap, std::unique_ptr<MemoryManager> memoryManager, std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory, std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles, - std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles) + std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles, + std::vector<std::pair<BackendId, ExecutionData>> executionDataVec, + BackendPtrMap* backends) : m_NetworkId(networkId) , m_WorkingMemDescriptors(workingMemDescriptors) - , m_WorkingMemDescriptorMap(workingMemDescriptorMap) , m_MemoryManager(std::move(memoryManager)) , m_TensorMemory(std::move(tensorMemory)) , m_ManagedTensorHandles(std::move(managedTensorHandles)) , m_UnmanagedTensorHandles(std::move(unmanagedTensorHandles)) , m_InputSize(numeric_cast<DifferenceType>(inputLayerInfo.size())) , m_IsAllocated(false) + , m_ExecutionDataVec(executionDataVec) + , m_Backends(backends) { for (const auto& inputInfo : inputLayerInfo) { @@ -109,6 +111,15 @@ void WorkingMemHandle::Allocate() { m_ManagedTensorHandles[i]->Import(m_TensorMemory[i].first->m_Data, m_TensorMemory[i].second); } + + // Assign previously allocated ExecutionData. Needs to be assigned after allocation so the void* are allocated. + for (unsigned int i = 0; i < m_ExecutionDataVec.size(); ++i) + { + auto& backend = m_Backends->at(m_ExecutionDataVec[i].first); + + ExecutionData executionData = backend->CreateExecutionData(GetWorkingMemDescriptorAt(i)); + m_ExecutionDataVec[i].second = executionData; + } } void WorkingMemHandle::Free() |