From 8d2ca734165a068478df7cffa46185680b05cd20 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Fri, 24 Feb 2023 10:28:19 +0000 Subject: Update Doxygen docu for 23.02 Signed-off-by: Nikhil Raj Change-Id: Ie6c19a27d50fefab2796b2b5875374e81f5bf971 --- ...quantization_float32_workload_8cpp_source.xhtml | 136 +++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 23.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml (limited to '23.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml') diff --git a/23.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml b/23.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..615bd719ad --- /dev/null +++ b/23.02/_ref_fake_quantization_float32_workload_8cpp_source.xhtml @@ -0,0 +1,136 @@ + + + + + + + + + + + + + +ArmNN: src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
RefFakeQuantizationFloat32Workload.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "RefWorkloadUtils.hpp"
9 
10 #include "Profiling.hpp"
11 
13 
14 namespace armnn
15 {
16 
17 void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
18 {
19  float scale = (max - min) / 255.f;
20  int32_t offset = armnn::numeric_cast<int32_t>((-min * 255.f) / (max - min));
21 
22  for (uint32_t i = 0; i < numElements; i++)
23  {
24  outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset));
25  }
26 
27 }
28 
30 {
32 }
33 
35 {
36  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
37  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
38 }
39 
40 void RefFakeQuantizationFloat32Workload::Execute(std::vector<ITensorHandle*> inputs,
41  std::vector<ITensorHandle*> outputs) const
42 {
43  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute");
44 
45  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
46 
47  const float* inputData = reinterpret_cast<const float*>(inputs[0]->Map());
48  float* outputData = reinterpret_cast<float*>(outputs[0]->Map());
49  FakeQuantization(inputData, outputData, inputInfo.GetNumElements(),
50  m_Data.m_Parameters.m_Min,
51  m_Data.m_Parameters.m_Max);
52 }
53 
54 } //namespace armnn
CPU Execution: Reference C++ kernels.
+ + + +
Copyright (c) 2021 ARM Limited and Contributors.
+
void FakeQuantization(const float *inputData, float *outputData, uint32_t numElements, float min, float max)
+
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
+ + + + +
QueueDescriptor m_Data
Definition: Workload.hpp:83
+ +
void ExecuteAsync(ExecutionData &executionData) override
+
std::vector< ITensorHandle * > m_Outputs
+ +
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
+ +
std::vector< ITensorHandle * > m_Inputs
+ + +
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
+
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+
+
+ + + + -- cgit v1.2.1