ArmNN
 21.05
RefWorkloadUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
15 
16 #include <BFloat16.hpp>
17 #include <Half.hpp>
18 
19 namespace armnn
20 {
21 
22 ////////////////////////////////////////////
23 /// float32 helpers
24 ////////////////////////////////////////////
25 
26 inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle)
27 {
28  // We know that reference workloads use RefTensorHandles for inputs and outputs
29  const RefTensorHandle* refTensorHandle =
30  PolymorphicDowncast<const RefTensorHandle*>(tensorHandle);
31  return refTensorHandle->GetTensorInfo();
32 }
33 
34 template <typename DataType, typename PayloadType>
35 const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data)
36 {
37  const ITensorHandle* tensorHandle = data.m_Inputs[idx];
38  return reinterpret_cast<const DataType*>(tensorHandle->Map());
39 }
40 
41 template <typename DataType, typename PayloadType>
42 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
43 {
44  ITensorHandle* tensorHandle = data.m_Outputs[idx];
45  return reinterpret_cast<DataType*>(tensorHandle->Map());
46 }
47 
48 template <typename DataType>
50 {
51  return reinterpret_cast<DataType*>(tensorHandle->Map());
52 }
53 
54 template <typename PayloadType>
55 const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data)
56 {
57  return GetInputTensorData<float>(idx, data);
58 }
59 
60 template <typename PayloadType>
61 float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data)
62 {
63  return GetOutputTensorData<float>(idx, data);
64 }
65 
66 template <typename PayloadType>
67 const Half* GetInputTensorDataHalf(unsigned int idx, const PayloadType& data)
68 {
69  return GetInputTensorData<Half>(idx, data);
70 }
71 
72 template <typename PayloadType>
73 Half* GetOutputTensorDataHalf(unsigned int idx, const PayloadType& data)
74 {
75  return GetOutputTensorData<Half>(idx, data);
76 }
77 
78 template <typename PayloadType>
79 const BFloat16* GetInputTensorDataBFloat16(unsigned int idx, const PayloadType& data)
80 {
81  return GetInputTensorData<BFloat16>(idx, data);
82 }
83 
84 template <typename PayloadType>
85 BFloat16* GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType& data)
86 {
87  return GetOutputTensorData<BFloat16>(idx, data);
88 }
89 
90 ////////////////////////////////////////////
91 /// u8 helpers
92 ////////////////////////////////////////////
93 
94 template<typename T>
95 std::vector<float> Dequantize(const T* quant, const TensorInfo& info)
96 {
97  std::vector<float> ret(info.GetNumElements());
98  for (size_t i = 0; i < info.GetNumElements(); i++)
99  {
100  ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
101  }
102  return ret;
103 }
104 
105 template<typename T>
106 inline void Dequantize(const T* inputData, float* outputData, const TensorInfo& info)
107 {
108  for (unsigned int i = 0; i < info.GetNumElements(); i++)
109  {
110  outputData[i] = Dequantize<T>(inputData[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
111  }
112 }
113 
114 inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info)
115 {
116  for (size_t i = 0; i < info.GetNumElements(); i++)
117  {
118  quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
119  }
120 }
121 
122 } //namespace armnn
const BFloat16 * GetInputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
Half * GetOutputTensorDataHalf(unsigned int idx, const PayloadType &data)
Copyright (c) 2021 ARM Limited and Contributors.
const Half * GetInputTensorDataHalf(unsigned int idx, const PayloadType &data)
DataType
Definition: Types.hpp:36
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
const TensorInfo & GetTensorInfo() const
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
BFloat16 * GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
half_float::half Half
Definition: Half.hpp:16
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
unsigned int GetNumElements() const
Definition: Tensor.hpp:192