14 inline const TensorInfo&
GetTensorInfo(
const ITensorHandle* tensorHandle)
17 const SampleTensorHandle* sampleTensorHandle =
18 static_cast<const SampleTensorHandle*
>(tensorHandle);
19 return sampleTensorHandle->GetTensorInfo();
25 return reinterpret_cast<const float*
>(tensorHandle->
Map());
31 return reinterpret_cast<float*
>(tensorHandle->
Map());
48 for (
unsigned int i = 0; i < num; ++i)
50 outputData[i] = inputData0[i] + inputData1[i];
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
virtual const void * Map(bool blocking=true) const =0
SampleDynamicAdditionWorkload(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info)
const AdditionQueueDescriptor m_Data
void Execute() const override
unsigned int GetNumElements() const
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers