20 m_TensorHandle = descriptor.
m_Inputs[0];
26 m_TensorHandle->
Map(
true);
27 m_TensorHandle->
Unmap();
35 workingMemDescriptor->
m_Inputs[0]->Map(
true);
36 workingMemDescriptor->
m_Inputs[0]->Unmap();
void Execute() const override
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
std::vector< ITensorHandle * > m_Inputs
void ExecuteAsync(ExecutionData &executionData) override
SyncMemGenericWorkload(const MemSyncQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual void Unmap() const =0
Unmap the tensor data.
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs