ArmNN
 23.02
MemImportWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <ResolveType.hpp>
7 
10 
11 #include <cstring>
12 
13 namespace armnn
14 {
15 
17  const WorkloadInfo& info)
19 {
20  m_TensorHandlePairs = std::make_pair(descriptor.m_Inputs[0], descriptor.m_Outputs[0]);
21 }
22 
24 {
25  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ImportMemGeneric_Execute");
26 
27  m_TensorHandlePairs.second->Import(const_cast<void*>(m_TensorHandlePairs.first->Map(true)), MemorySource::Malloc);
28  m_TensorHandlePairs.first->Unmap();
29 }
30 
31 } //namespace armnn
armnn::ImportMemGenericWorkload::ImportMemGenericWorkload
ImportMemGenericWorkload(const MemImportQueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: MemImportWorkload.cpp:16
armnn::MemorySource::Malloc
@ Malloc
TensorHandle.hpp
armnn::ImportMemGenericWorkload::Execute
void Execute() const override
Definition: MemImportWorkload.cpp:23
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::BaseWorkload
Definition: Workload.hpp:33
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::Compute::Undefined
@ Undefined
armnn::MemImportQueueDescriptor
Definition: WorkloadData.hpp:94
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
ResolveType.hpp
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
MemImportWorkload.hpp
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::BoostLogSeverityMapping::info
@ info