From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- ...quantization_float32_workload_8cpp_source.xhtml | 130 +++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 20.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml (limited to '20.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml') diff --git a/20.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml b/20.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..b715f8d99e --- /dev/null +++ b/20.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml @@ -0,0 +1,130 @@ + + + + + + + + + + + + + +ArmNN: src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
RefFakeQuantizationFloat32Workload.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 #include "Profiling.hpp"
11 
12 #include <boost/numeric/conversion/cast.hpp>
13 
14 namespace armnn
15 {
16 
17 void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
18 {
19  float scale = (max - min) / 255.f;
20  int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min));
21 
22  for (uint32_t i = 0; i < numElements; i++)
23  {
24  outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset));
25  }
26 
27 }
28 
30 {
31  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute");
32 
33  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
34 
35  const float* inputData = GetInputTensorDataFloat(0, m_Data);
36  float* outputData = GetOutputTensorDataFloat(0, m_Data);
37  FakeQuantization(inputData, outputData, inputInfo.GetNumElements(),
38  m_Data.m_Parameters.m_Min,
39  m_Data.m_Parameters.m_Max);
40 }
41 
42 } //namespace armnn
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
+
CPU Execution: Reference C++ kernels.
+ + + +
const QueueDescriptor m_Data
Definition: Workload.hpp:46
+
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+
Copyright (c) 2020 ARM Limited.
+
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
+
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:169
+
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
+ +
std::vector< ITensorHandle * > m_Inputs
+
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
+ + +
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
+
+
+ + + + -- cgit v1.2.1