ArmNN
 22.11
MemImportWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <ResolveType.hpp>
7 
10 
11 #include <cstring>
12 
13 namespace armnn
14 {
15 
17  const WorkloadInfo& info)
18  : BaseWorkload<MemImportQueueDescriptor>(descriptor, info)
19 {
20  m_TensorHandlePairs = std::make_pair(descriptor.m_Inputs[0], descriptor.m_Outputs[0]);
21 }
22 
24 {
25  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ImportMemGeneric_Execute");
26 
27  m_TensorHandlePairs.second->Import(const_cast<void*>(m_TensorHandlePairs.first->Map(true)), MemorySource::Malloc);
28  m_TensorHandlePairs.first->Unmap();
29 }
30 
31 } //namespace armnn
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
ImportMemGenericWorkload(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info)
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs