From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- 20.02/_ref_workload_utils_8hpp_source.xhtml | 138 ++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 20.02/_ref_workload_utils_8hpp_source.xhtml (limited to '20.02/_ref_workload_utils_8hpp_source.xhtml') diff --git a/20.02/_ref_workload_utils_8hpp_source.xhtml b/20.02/_ref_workload_utils_8hpp_source.xhtml new file mode 100644 index 0000000000..1cfe625b88 --- /dev/null +++ b/20.02/_ref_workload_utils_8hpp_source.xhtml @@ -0,0 +1,138 @@ + + + + + + + + + + + + + +ArmNN: src/backends/reference/workloads/RefWorkloadUtils.hpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
RefWorkloadUtils.hpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
12 
14 
15 #include <Half.hpp>
16 #include <boost/polymorphic_cast.hpp>
17 
18 namespace armnn
19 {
20 
21 ////////////////////////////////////////////
22 /// float32 helpers
23 ////////////////////////////////////////////
24 
25 inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle)
26 {
27  // We know that reference workloads use RefTensorHandles for inputs and outputs
28  const RefTensorHandle* refTensorHandle =
29  boost::polymorphic_downcast<const RefTensorHandle*>(tensorHandle);
30  return refTensorHandle->GetTensorInfo();
31 }
32 
33 template <typename DataType, typename PayloadType>
34 const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data)
35 {
36  const ITensorHandle* tensorHandle = data.m_Inputs[idx];
37  return reinterpret_cast<const DataType*>(tensorHandle->Map());
38 }
39 
40 template <typename DataType, typename PayloadType>
41 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
42 {
43  ITensorHandle* tensorHandle = data.m_Outputs[idx];
44  return reinterpret_cast<DataType*>(tensorHandle->Map());
45 }
46 
47 template <typename PayloadType>
48 const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data)
49 {
50  return GetInputTensorData<float>(idx, data);
51 }
52 
53 template <typename PayloadType>
54 float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data)
55 {
56  return GetOutputTensorData<float>(idx, data);
57 }
58 
59 template <typename PayloadType>
60 const Half* GetInputTensorDataHalf(unsigned int idx, const PayloadType& data)
61 {
62  return GetInputTensorData<Half>(idx, data);
63 }
64 
65 template <typename PayloadType>
66 Half* GetOutputTensorDataHalf(unsigned int idx, const PayloadType& data)
67 {
68  return GetOutputTensorData<Half>(idx, data);
69 }
70 
71 ////////////////////////////////////////////
72 /// u8 helpers
73 ////////////////////////////////////////////
74 
75 template<typename T>
76 std::vector<float> Dequantize(const T* quant, const TensorInfo& info)
77 {
78  std::vector<float> ret(info.GetNumElements());
79  for (size_t i = 0; i < info.GetNumElements(); i++)
80  {
81  ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
82  }
83  return ret;
84 }
85 
86 template<typename T>
87 inline void Dequantize(const T* inputData, float* outputData, const TensorInfo& info)
88 {
89  for (unsigned int i = 0; i < info.GetNumElements(); i++)
90  {
91  outputData[i] = Dequantize<T>(inputData[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
92  }
93 }
94 
95 inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info)
96 {
97  for (size_t i = 0; i < info.GetNumElements(); i++)
98  {
99  quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
100  }
101 }
102 
103 } //namespace armnn
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
+
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:47
+
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
+ + + +
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+
Half * GetOutputTensorDataHalf(unsigned int idx, const PayloadType &data)
+
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
+
Copyright (c) 2020 ARM Limited.
+
const Half * GetInputTensorDataHalf(unsigned int idx, const PayloadType &data)
+
DataType
Definition: Types.hpp:32
+
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
+ +
float GetQuantizationScale() const
Definition: Tensor.cpp:247
+ +
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:31
+ + + +
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
+
const TensorInfo & GetTensorInfo() const
+
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
+
half_float::half Half
Definition: Half.hpp:16
+
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
+
+
+ + + + -- cgit v1.2.1