ArmNN
 22.11
MemSyncWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <ResolveType.hpp>
7 
10 
11 #include <cstring>
12 
13 namespace armnn
14 {
15 
17  const WorkloadInfo& info)
18  : BaseWorkload<MemSyncQueueDescriptor>(descriptor, info)
19 {
20  m_TensorHandle = descriptor.m_Inputs[0];
21 }
22 
24 {
25  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "SyncMemGeneric_Execute");
26  m_TensorHandle->Map(true);
27  m_TensorHandle->Unmap();
28 }
29 
31 {
32  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "SyncMemGeneric_Execute_WorkingMemDescriptor");
33 
34  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
35  workingMemDescriptor->m_Inputs[0]->Map(true);
36  workingMemDescriptor->m_Inputs[0]->Unmap();
37 }
38 
39 } //namespace armnn
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
void ExecuteAsync(ExecutionData &executionData) override
SyncMemGenericWorkload(const MemSyncQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual void Unmap() const =0
Unmap the tensor data.
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs