20 m_TensorHandle = descriptor.
m_Inputs[0];
26 m_TensorHandle->
Map(
true);
27 m_TensorHandle->
Unmap();
void Execute() const override
void ExecuteAsync(WorkingMemDescriptor &descriptor) override
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
std::vector< ITensorHandle * > m_Inputs
SyncMemGenericWorkload(const MemSyncQueueDescriptor &descriptor, const WorkloadInfo &info)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual void Unmap() const =0
Unmap the tensor data.
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs