ArmNN
 21.08
Decoders.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "BaseIterator.hpp"
9 
12 
13 #include <armnn/utility/Assert.hpp>
14 
15 namespace armnn
16 {
17 
18 namespace
19 {
20 
21 inline std::unique_ptr<Decoder<float>> MakeSigned32PerAxisDecoder(const TensorInfo& info, const void* data)
22 {
23  return std::make_unique<ScaledInt32PerAxisDecoder>(static_cast<const int32_t*>(data), info);
24 }
25 
26 inline std::unique_ptr<Decoder<float>> MakeSigned32Decoder(const TensorInfo& info, const void* data)
27 {
28  if(info.HasMultipleQuantizationScales())
29  {
30  // NOTE: If we have multiple quantization scales, we create a ScaledInt32PerAxisDecoder.
31  // This will be used to decode per-axis quantized convolution biases.
32  return MakeSigned32PerAxisDecoder(info, data);
33  }
34  else
35  {
36  if (info.GetQuantizationDim().has_value())
37  {
38  // NOTE: Even though we only have a single quantization scale, if the quantization
39  // dimension is set, the tensor has per-axis quantization and we need to create a
40  // ScaledInt32PerAxisDecoder
41  return MakeSigned32PerAxisDecoder(info, data);
42  }
43 
44  const float scale = info.GetQuantizationScale();
45  if (scale == 0.f)
46  {
47  // NOTE:: If no quantization scale is set, we create an Int32Decoder, which simply
48  // casts the int value to float. This will be used for any INT32 data other than
49  // convolution biases.
50  return std::make_unique<Int32Decoder>(static_cast<const int32_t*>(data));
51  }
52 
53  // NOTE: If we only have a single (non-zero) quantization scale and no quantization
54  // dimension is specified, we need to create a ScaledInt32Decoder. This will be used
55  // to decode per-tensor quantized convolution biases.
56  return std::make_unique<ScaledInt32Decoder>(static_cast<const int32_t*>(data), scale);
57  }
58 }
59 
60 } // anonymous namespace
61 
62 template<typename T>
63 inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data = nullptr);
64 
65 template<>
66 inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const void* data)
67 {
68  switch(info.GetDataType())
69  {
72  {
73  std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
74  return std::make_unique<QSymm8PerAxisDecoder>(static_cast<const int8_t*>(data), info);
75  }
77  case DataType::QAsymmS8:
78  {
79  return std::make_unique<QASymmS8Decoder>(
80  static_cast<const int8_t*>(data),
81  info.GetQuantizationScale(),
82  info.GetQuantizationOffset());
83  }
84  case DataType::QAsymmU8:
85  {
86  return std::make_unique<QASymm8Decoder>(
87  static_cast<const uint8_t*>(data),
88  info.GetQuantizationScale(),
89  info.GetQuantizationOffset());
90  }
91  case DataType::QSymmS16:
92  {
93  return std::make_unique<QSymm16Decoder>(
94  static_cast<const int16_t*>(data),
95  info.GetQuantizationScale(),
96  info.GetQuantizationOffset());
97  }
98  case DataType::BFloat16:
99  {
100  return std::make_unique<BFloat16Decoder>(static_cast<const BFloat16*>(data));
101  }
102  case DataType::Float16:
103  {
104  return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
105  }
106  case DataType::Float32:
107  {
108  return std::make_unique<Float32Decoder>(static_cast<const float*>(data));
109  }
110  case DataType::Signed32:
111  {
112  return MakeSigned32Decoder(info, data);
113  }
114  case DataType::QSymmS8:
115  {
116  if (info.HasPerAxisQuantization())
117  {
118  std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
119  return std::make_unique<QSymm8PerAxisDecoder>(static_cast<const int8_t*>(data), info);
120  }
121  else
122  {
123  return std::make_unique<QSymmS8Decoder>(
124  static_cast<const int8_t*>(data),
125  info.GetQuantizationScale(),
126  info.GetQuantizationOffset());
127  }
128  }
130  {
131  return std::make_unique<BooleanDecoder>(static_cast<const uint8_t*>(data));
132  }
133  default:
134  {
135  ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
136  break;
137  }
138  }
139  return nullptr;
140 }
141 
142 template<>
143 inline std::unique_ptr<Decoder<bool>> MakeDecoder(const TensorInfo& info, const void* data)
144 {
145  switch(info.GetDataType())
146  {
147  case DataType::Boolean:
148  {
149  return std::make_unique<BooleanDecoderBool>(static_cast<const uint8_t*>(data));
150  }
151  default:
152  {
153  ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
154  break;
155  }
156  }
157  return nullptr;
158 }
159 
160 template<>
161 inline std::unique_ptr<Decoder<int32_t>> MakeDecoder(const TensorInfo& info, const void* data)
162 {
163  switch(info.GetDataType())
164  {
165  case DataType::Signed32:
166  {
167  return std::make_unique<Int32ToInt32tDecoder>(static_cast<const int32_t*>(data));
168  }
169  default:
170  {
171  ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
172  break;
173  }
174  }
175  return nullptr;
176 }
177 
178 } //namespace armnn
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:448
std::pair< unsigned int, std::vector< float > > GetPerAxisParams(const armnn::TensorInfo &info)
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< Decoder< T > > MakeDecoder(const TensorInfo &info, const void *data=nullptr)
Definition: Decoders.hpp:66
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
DataType GetDataType() const
Definition: Tensor.hpp:198
half_float::half Half
Definition: Half.hpp:16