12 #include <boost/numeric/conversion/cast.hpp> 17 void FakeQuantization(
const float* inputData,
float* outputData, uint32_t numElements,
float min,
float max)
19 float scale = (max - min) / 255.f;
20 int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min));
22 for (uint32_t i = 0; i < numElements; i++)
24 outputData[i] =
static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset));
39 m_Data.m_Parameters.m_Max);
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
virtual void Execute() const override
const QueueDescriptor m_Data
CPU Execution: Reference C++ kernels.
unsigned int GetNumElements() const
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers