const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
CPU Execution: Reference C++ kernels.
const QueueDescriptor m_Data
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Half * GetOutputTensorDataHalf(unsigned int idx, const PayloadType &data)
Copyright (c) 2020 ARM Limited.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
virtual void Execute() const override
std::vector< ITensorHandle * > m_Inputs
unsigned int GetNumElements() const