ArmNN
 20.08
Types.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <array>
8 #include <functional>
9 #include <memory>
10 #include <stdint.h>
11 #include "BackendId.hpp"
12 #include "Exceptions.hpp"
13 #include "Deprecated.hpp"
14 
15 namespace armnn
16 {
17 
18 constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19 
20 /// The lowest performance data capture interval we support is 10 miliseconds.
21 constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22 
23 /// @enum Status enumeration
24 /// @var Status::Successful
25 /// @var Status::Failure
26 enum class Status
27 {
28  Success = 0,
29  Failure = 1
30 };
31 
32 enum class DataType
33 {
34  Float16 = 0,
35  Float32 = 1,
36  QAsymmU8 = 2,
37  Signed32 = 3,
38  Boolean = 4,
39  QSymmS16 = 5,
40  QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
41  QSymmS8 = 7,
42  QAsymmS8 = 8,
43  BFloat16 = 9,
44 
45  QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
46  QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
47 };
48 
49 enum class DataLayout
50 {
51  NCHW = 1,
52  NHWC = 2
53 };
54 
56 {
57  Sigmoid = 0,
58  TanH = 1,
59  Linear = 2,
60  ReLu = 3,
61  BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
62  SoftReLu = 5,
63  LeakyReLu = 6,
64  Abs = 7,
65  Sqrt = 8,
66  Square = 9,
67  Elu = 10,
68  HardSwish = 11
69 };
70 
72 {
73  Min = 0,
74  Max = 1
75 };
76 
78 {
79  Equal = 0,
80  Greater = 1,
81  GreaterOrEqual = 2,
82  Less = 3,
83  LessOrEqual = 4,
84  NotEqual = 5
85 };
86 
87 enum class UnaryOperation
88 {
89  Abs = 0,
90  Exp = 1,
91  Sqrt = 2,
92  Rsqrt = 3,
93  Neg = 4
94 };
95 
96 enum class PoolingAlgorithm
97 {
98  Max = 0,
99  Average = 1,
100  L2 = 2
101 };
102 
103 enum class ResizeMethod
104 {
105  Bilinear = 0,
106  NearestNeighbor = 1
107 };
108 
109 enum class Dimensionality
110 {
111  NotSpecified = 0,
112  Specified = 1,
113  Scalar = 2
114 };
115 
116 ///
117 /// The padding method modifies the output of pooling layers.
118 /// In both supported methods, the values are ignored (they are
119 /// not even zeroes, which would make a difference for max pooling
120 /// a tensor with negative values). The difference between
121 /// IgnoreValue and Exclude is that the former counts the padding
122 /// fields in the divisor of Average and L2 pooling, while
123 /// Exclude does not.
124 ///
125 enum class PaddingMethod
126 {
127  /// The padding fields count, but are ignored
128  IgnoreValue = 0,
129  /// The padding fields don't count and are ignored
130  Exclude = 1
131 };
132 
134 {
135  Across = 0,
136  Within = 1
137 };
138 
140 {
141  /// Krichevsky 2012: Local Brightness Normalization
142  LocalBrightness = 0,
143  /// Jarret 2009: Local Contrast Normalization
144  LocalContrast = 1
145 };
146 
148 {
149  Floor = 0,
150  Ceiling = 1
151 };
152 
153 ///
154 /// The ShapeInferenceMethod modify how the output shapes are treated.
155 /// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer
156 /// and any mismatch is reported.
157 /// When InferAndValidate is selected 2 actions must be performed: (1)infer output shape from inputs and (2)validate the
158 /// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not
159 /// specified explicitly, however this information can be calculated from the inputs.
160 ///
162 {
163  /// Validate all output shapes
164  ValidateOnly = 0,
165  /// Infer missing output shapes and validate all output shapes
166  InferAndValidate = 1
167 };
168 
169 /// Each backend should implement an IBackend.
170 class IBackend
171 {
172 protected:
173  IBackend() {}
174  virtual ~IBackend() {}
175 
176 public:
177  virtual const BackendId& GetId() const = 0;
178 };
179 
180 using IBackendSharedPtr = std::shared_ptr<IBackend>;
181 using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
182 
183 /// Device specific knowledge to be passed to the optimizer.
185 {
186 protected:
188  virtual ~IDeviceSpec() {}
189 public:
190  virtual const BackendIdSet& GetSupportedBackends() const = 0;
191 };
192 
193 /// Type of identifiers for bindable layers (inputs, outputs).
194 using LayerBindingId = int;
195 
197 {
198 public:
199  using ValueType = unsigned int;
200  using SizeType = unsigned int;
201  using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
202  using ConstIterator = typename ArrayType::const_iterator;
203 
204  /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
205  /// when source and target potentially have different memory layouts.
206  ///
207  /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
208  /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
209  /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
210  /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
211  /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
212  /// [ 0, 2, 3, 1 ].
213  ///
214  /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
215  /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
216  /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
217  /// [ 0, 3, 1, 2 ].
218  ///
219  PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
220 
221  PermutationVector(std::initializer_list<ValueType> dimMappings);
222 
223  ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
224 
225  SizeType GetSize() const { return m_NumDimMappings; }
226 
227  ConstIterator begin() const { return m_DimMappings.begin(); }
228  ConstIterator end() const { return m_DimMappings.end(); }
229 
230  bool IsEqual(const PermutationVector& other) const
231  {
232  if (m_NumDimMappings != other.m_NumDimMappings) return false;
233  for (unsigned int i = 0; i < m_NumDimMappings; ++i)
234  {
235  if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
236  }
237  return true;
238  }
239 
240  bool IsInverse(const PermutationVector& other) const
241  {
242  bool isInverse = (GetSize() == other.GetSize());
243  for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
244  {
245  isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
246  }
247  return isInverse;
248  }
249 
250 private:
251  ArrayType m_DimMappings;
252  /// Number of valid entries in @ref m_DimMappings
253  SizeType m_NumDimMappings;
254 };
255 
256 namespace profiling { class ProfilingGuid; }
257 
258 /// Define LayerGuid type.
260 
261 class ITensorHandle;
262 
263 /// Define the type of callback for the Debug layer to call
264 /// @param guid - guid of layer connected to the input of the Debug layer
265 /// @param slotIndex - index of the output slot connected to the input of the Debug layer
266 /// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
267 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
268 
269 
270 namespace profiling
271 {
272 
273 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
274 
276 {
277 public:
278  ProfilingGuid() : m_Guid(0) {}
279 
280  ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
281 
282  operator uint64_t() const { return m_Guid; }
283 
284  bool operator==(const ProfilingGuid& other) const
285  {
286  return m_Guid == other.m_Guid;
287  }
288 
289  bool operator!=(const ProfilingGuid& other) const
290  {
291  return m_Guid != other.m_Guid;
292  }
293 
294  bool operator<(const ProfilingGuid& other) const
295  {
296  return m_Guid < other.m_Guid;
297  }
298 
299  bool operator<=(const ProfilingGuid& other) const
300  {
301  return m_Guid <= other.m_Guid;
302  }
303 
304  bool operator>(const ProfilingGuid& other) const
305  {
306  return m_Guid > other.m_Guid;
307  }
308 
309  bool operator>=(const ProfilingGuid& other) const
310  {
311  return m_Guid >= other.m_Guid;
312  }
313 
314 protected:
315  uint64_t m_Guid;
316 };
317 
318 /// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
320 {
322 };
323 
325 {
327 };
328 
329 } // namespace profiling
330 
331 } // namespace armnn
332 
333 
334 namespace std
335 {
336 /// make ProfilingGuid hashable
337 template<>
338 struct hash<armnn::profiling::ProfilingGuid>
339 {
340  std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
341  {
342  return hash<uint64_t>()(uint64_t(guid));
343  }
344 };
345 
346 /// make ProfilingDynamicGuid hashable
347 template<>
348 struct hash<armnn::profiling::ProfilingDynamicGuid>
349 {
350  std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
351  {
352  return hash<uint64_t>()(uint64_t(guid));
353  }
354 };
355 
356 /// make ProfilingStaticGuid hashable
357 template<>
358 struct hash<armnn::profiling::ProfilingStaticGuid>
359 {
360  std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
361  {
362  return hash<uint64_t>()(uint64_t(guid));
363  }
364 };
365 } // namespace std
bool operator<(const ProfilingGuid &other) const
Definition: Types.hpp:294
unsigned int ValueType
Definition: Types.hpp:199
bool operator>=(const ProfilingGuid &other) const
Definition: Types.hpp:309
Dimensionality
Definition: Types.hpp:109
DataLayout
Definition: Types.hpp:49
virtual ~IBackend()
Definition: Types.hpp:174
bool operator==(const ProfilingGuid &other) const
Definition: Types.hpp:284
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
std::size_t operator()(armnn::profiling::ProfilingGuid const &guid) const noexcept
Definition: Types.hpp:340
typename ArrayType::const_iterator ConstIterator
Definition: Types.hpp:202
Each backend should implement an IBackend.
Definition: Types.hpp:170
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
Definition: Types.hpp:319
The padding fields don&#39;t count and are ignored.
NormalizationAlgorithmChannel
Definition: Types.hpp:133
Copyright (c) 2020 ARM Limited.
SizeType GetSize() const
Definition: Types.hpp:225
PoolingAlgorithm
Definition: Types.hpp:96
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:267
std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const &guid) const noexcept
Definition: Types.hpp:350
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:194
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:125
std::shared_ptr< IBackend > IBackendSharedPtr
Definition: Types.hpp:180
ComparisonOperation
Definition: Types.hpp:77
bool operator<=(const ProfilingGuid &other) const
Definition: Types.hpp:299
DataType
Definition: Types.hpp:32
#define ARMNN_DEPRECATED_ENUM_MSG(message)
Definition: Deprecated.hpp:50
std::size_t operator()(armnn::profiling::ProfilingStaticGuid const &guid) const noexcept
Definition: Types.hpp:360
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Definition: Types.hpp:201
Validate all output shapes.
Status
enumeration
Definition: Types.hpp:26
virtual ~IDeviceSpec()
Definition: Types.hpp:188
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:184
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
Definition: Types.hpp:21
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
Definition: Types.hpp:181
min(a, max(b, input)) ReLu1 & ReLu6.
unsigned int SizeType
Definition: Types.hpp:200
ValueType operator[](SizeType i) const
Definition: Types.hpp:223
OutputShapeRounding
Definition: Types.hpp:147
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:230
The padding fields count, but are ignored.
ProfilingGuid(uint64_t guid)
Definition: Types.hpp:280
bool operator!=(const ProfilingGuid &other) const
Definition: Types.hpp:289
Jarret 2009: Local Contrast Normalization.
ArgMinMaxFunction
Definition: Types.hpp:71
ConstIterator begin() const
Definition: Types.hpp:227
ResizeMethod
Definition: Types.hpp:103
ConstIterator end() const
Definition: Types.hpp:228
UnaryOperation
Definition: Types.hpp:87
Infer missing output shapes and validate all output shapes.
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
Definition: Types.hpp:240
NormalizationAlgorithmMethod
Definition: Types.hpp:139
bool operator>(const ProfilingGuid &other) const
Definition: Types.hpp:304
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:161
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:18
ActivationFunction
Definition: Types.hpp:55