ArmNN
 23.08
RefWorkloadUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
9 
10 #include <armnn/Tensor.hpp>
11 #include <armnn/Types.hpp>
13 
15 
16 #include <BFloat16.hpp>
17 #include <Half.hpp>
18 
19 namespace armnn
20 {
21 /// Creates a profiling event that uses GetGuid() and GetName() from the calling class
22 #define ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID(label) \
23 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuRef, \
24  this->GetGuid(), \
25  this->GetName() + "_" + label, \
26  armnn::WallClockTimer())
27 
28 ////////////////////////////////////////////
29 /// float32 helpers
30 ////////////////////////////////////////////
31 
32 template <typename TensorHandleType = RefTensorHandle>
33 inline const TensorInfo& GetTensorInfo(const ITensorHandle* tensorHandle)
34 {
35  // We know that reference workloads use RefTensorHandles for inputs and outputs
36  const TensorHandleType* refTensorHandle =
37  PolymorphicDowncast<const TensorHandleType*>(tensorHandle);
38  return refTensorHandle->GetTensorInfo();
39 }
40 
41 template <typename DataType, typename PayloadType>
42 const DataType* GetInputTensorData(unsigned int idx, const PayloadType& data)
43 {
44  const ITensorHandle* tensorHandle = data.m_Inputs[idx];
45  return reinterpret_cast<const DataType*>(tensorHandle->Map());
46 }
47 
48 template <typename DataType, typename PayloadType>
49 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
50 {
51  ITensorHandle* tensorHandle = data.m_Outputs[idx];
52  return reinterpret_cast<DataType*>(tensorHandle->Map());
53 }
54 
55 template <typename DataType>
57 {
58  return reinterpret_cast<DataType*>(tensorHandle->Map());
59 }
60 
61 template <typename PayloadType>
62 const float* GetInputTensorDataFloat(unsigned int idx, const PayloadType& data)
63 {
64  return GetInputTensorData<float>(idx, data);
65 }
66 
67 template <typename PayloadType>
68 float* GetOutputTensorDataFloat(unsigned int idx, const PayloadType& data)
69 {
70  return GetOutputTensorData<float>(idx, data);
71 }
72 
73 template <typename PayloadType>
74 const Half* GetInputTensorDataHalf(unsigned int idx, const PayloadType& data)
75 {
76  return GetInputTensorData<Half>(idx, data);
77 }
78 
79 template <typename PayloadType>
80 Half* GetOutputTensorDataHalf(unsigned int idx, const PayloadType& data)
81 {
82  return GetOutputTensorData<Half>(idx, data);
83 }
84 
85 template <typename PayloadType>
86 const BFloat16* GetInputTensorDataBFloat16(unsigned int idx, const PayloadType& data)
87 {
88  return GetInputTensorData<BFloat16>(idx, data);
89 }
90 
91 template <typename PayloadType>
92 BFloat16* GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType& data)
93 {
94  return GetOutputTensorData<BFloat16>(idx, data);
95 }
96 
97 ////////////////////////////////////////////
98 /// u8 helpers
99 ////////////////////////////////////////////
100 
101 template<typename T>
102 std::vector<float> Dequantize(const T* quant, const TensorInfo& info)
103 {
104  std::vector<float> ret(info.GetNumElements());
105  for (size_t i = 0; i < info.GetNumElements(); i++)
106  {
107  ret[i] = armnn::Dequantize(quant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
108  }
109  return ret;
110 }
111 
112 template<typename T>
113 inline void Dequantize(const T* inputData, float* outputData, const TensorInfo& info)
114 {
115  for (unsigned int i = 0; i < info.GetNumElements(); i++)
116  {
117  outputData[i] = Dequantize<T>(inputData[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
118  }
119 }
120 
121 inline void Quantize(uint8_t* quant, const float* dequant, const TensorInfo& info)
122 {
123  for (size_t i = 0; i < info.GetNumElements(); i++)
124  {
125  quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
126  }
127 }
128 
129 } //namespace armnn
armnn::GetOutputTensorData
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
Definition: ClWorkloadUtils.hpp:181
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::GetInputTensorDataHalf
const Half * GetInputTensorDataHalf(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:74
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::GetOutputTensorDataBFloat16
BFloat16 * GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:92
PolymorphicDowncast.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::Dequantize
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
armnn::GetOutputTensorDataFloat
float * GetOutputTensorDataFloat(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:68
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition: RefWorkloadUtils.hpp:33
armnn::Quantize
QuantizedType Quantize(float value, float scale, int32_t offset)
Quantize a floating point data type into an 8-bit data type.
Definition: TypesUtils.cpp:30
armnn::BoostLogSeverityMapping::info
@ info
armnn::GetInputTensorDataBFloat16
const BFloat16 * GetInputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:86
Half.hpp
Tensor.hpp
TensorHandle.hpp
armnn::GetInputTensorDataFloat
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:62
armnn::BFloat16
Definition: BFloat16.hpp:15
armnn::GetInputTensorData
const DataType * GetInputTensorData(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:42
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
Types.hpp
BFloat16.hpp
RefTensorHandle.hpp
armnn::GetOutputTensorDataHalf
Half * GetOutputTensorDataHalf(unsigned int idx, const PayloadType &data)
Definition: RefWorkloadUtils.hpp:80
armnn::ITensorHandle::Map
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.