diff options
author | Ferran Balaguer <ferran.balaguer@arm.com> | 2019-08-07 15:14:56 +0100 |
---|---|---|
committer | Ferran Balaguer Arm <ferran.balaguer@arm.com> | 2019-08-20 14:31:07 +0000 |
commit | bfeb2711da172b26931c58af7b15d434ef49e24e (patch) | |
tree | 3516b24857effe4c98737f48026dabf75d1f360a /src/armnn/LoadedNetwork.cpp | |
parent | aec942c3f258db46e0fb8054d39c0e7c3c411728 (diff) | |
download | armnn-bfeb2711da172b26931c58af7b15d434ef49e24e.tar.gz |
IVGCVSW-3606 Support memory import for Reference backend
Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com>
Change-Id: I94bd191f88e0911ad4e4727610e81cd7afa95512
Diffstat (limited to 'src/armnn/LoadedNetwork.cpp')
-rw-r--r-- | src/armnn/LoadedNetwork.cpp | 37 |
1 files changed, 20 insertions, 17 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index f5f79f3940..5b64085869 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -444,26 +444,29 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten // b) The tensor has zero padding // c) There is only one connection to the OutputSlot and it is to an OutputLayer. // d) The output pointer is allocated via malloc. (Other types will be supported in a later release) - if (layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetNumConnections() == 1) + if (layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOwningLayer().GetType() != LayerType::Input) { - MemorySourceFlags importFlags = inputTensorHandle->GetImportFlags(); - if (CheckFlag(importFlags, MemorySource::Malloc)) + if (layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetNumConnections() == 1) { - void* mem = tensorHandle->Map(false); - bool importOk = inputTensorHandle->Import(mem, MemorySource::Malloc); - tensorHandle->Unmap(); - - if (importOk) + MemorySourceFlags importFlags = inputTensorHandle->GetImportFlags(); + if (CheckFlag(importFlags, MemorySource::Malloc)) { - // Insert synchronization workload - MemSyncQueueDescriptor syncDesc; - syncDesc.m_Inputs.push_back(inputTensorHandle); - info.m_InputTensorInfos.push_back(inputTensorInfo); - auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info); - BOOST_ASSERT_MSG(syncWorkload, "No sync workload created"); - m_OutputQueue.push_back(move(syncWorkload)); - - return; //No need to add the output workload below + void *mem = tensorHandle->Map(false); + bool importOk = inputTensorHandle->Import(mem, MemorySource::Malloc); + tensorHandle->Unmap(); + + if (importOk) + { + // Insert synchronization workload + MemSyncQueueDescriptor syncDesc; + syncDesc.m_Inputs.push_back(inputTensorHandle); + info.m_InputTensorInfos.push_back(inputTensorInfo); + auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info); + BOOST_ASSERT_MSG(syncWorkload, "No sync workload created"); + m_OutputQueue.push_back(move(syncWorkload)); + + return; //No need to add the output workload below + } } } } |