14 inline const TensorInfo&
GetTensorInfo(
const ITensorHandle* tensorHandle)
17 const SampleTensorHandle* sampleTensorHandle =
18 static_cast<const SampleTensorHandle*
>(tensorHandle);
19 return sampleTensorHandle->GetTensorInfo();
25 return reinterpret_cast<const float*
>(tensorHandle->
Map());
31 return reinterpret_cast<float*
>(tensorHandle->
Map());
48 for (
unsigned int i = 0; i < num; ++i)
50 outputData[i] = inputData0[i] + inputData1[i];
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
const AdditionQueueDescriptor m_Data
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
Copyright (c) 2020 ARM Limited.
SampleDynamicAdditionWorkload(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info)
void Execute() const override
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
unsigned int GetNumElements() const