ArmNN
 20.05
RefFakeQuantizationFloat32Workload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 #include "Profiling.hpp"
11 
12 #include <boost/numeric/conversion/cast.hpp>
13 
14 namespace armnn
15 {
16 
17 void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
18 {
19  float scale = (max - min) / 255.f;
20  int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min));
21 
22  for (uint32_t i = 0; i < numElements; i++)
23  {
24  outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset));
25  }
26 
27 }
28 
30 {
31  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute");
32 
33  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
34 
35  const float* inputData = GetInputTensorDataFloat(0, m_Data);
36  float* outputData = GetOutputTensorDataFloat(0, m_Data);
37  FakeQuantization(inputData, outputData, inputInfo.GetNumElements(),
38  m_Data.m_Parameters.m_Min,
39  m_Data.m_Parameters.m_Max);
40 }
41 
42 } //namespace armnn
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
CPU Execution: Reference C++ kernels.
const QueueDescriptor m_Data
Definition: Workload.hpp:46
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Copyright (c) 2020 ARM Limited.
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:169
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
std::vector< ITensorHandle * > m_Inputs
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
unsigned int GetNumElements() const
Definition: Tensor.hpp:93