ArmNN
 20.11
Types.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <array>
8 #include <functional>
9 #include <memory>
10 #include <stdint.h>
11 #include "BackendId.hpp"
12 #include "Exceptions.hpp"
13 #include "Deprecated.hpp"
14 
15 namespace armnn
16 {
17 
18 constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19 
20 /// The lowest performance data capture interval we support is 10 miliseconds.
21 constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22 
23 /// @enum Status enumeration
24 /// @var Status::Successful
25 /// @var Status::Failure
26 enum class Status
27 {
28  Success = 0,
29  Failure = 1
30 };
31 
32 enum class DataType
33 {
34  Float16 = 0,
35  Float32 = 1,
36  QAsymmU8 = 2,
37  Signed32 = 3,
38  Boolean = 4,
39  QSymmS16 = 5,
40  QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
41  QSymmS8 = 7,
42  QAsymmS8 = 8,
43  BFloat16 = 9,
44  Signed64 = 10,
45 
46  QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
47  QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
48 };
49 
50 enum class DataLayout
51 {
52  NCHW = 1,
53  NHWC = 2
54 };
55 
57 {
58  Sigmoid = 0,
59  TanH = 1,
60  Linear = 2,
61  ReLu = 3,
62  BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
63  SoftReLu = 5,
64  LeakyReLu = 6,
65  Abs = 7,
66  Sqrt = 8,
67  Square = 9,
68  Elu = 10,
69  HardSwish = 11
70 };
71 
73 {
74  Min = 0,
75  Max = 1
76 };
77 
79 {
80  Equal = 0,
81  Greater = 1,
82  GreaterOrEqual = 2,
83  Less = 3,
84  LessOrEqual = 4,
85  NotEqual = 5
86 };
87 
89 {
90  LogicalAnd = 0,
91  LogicalOr = 1
92 };
93 
94 enum class UnaryOperation
95 {
96  Abs = 0,
97  Exp = 1,
98  Sqrt = 2,
99  Rsqrt = 3,
100  Neg = 4,
101  LogicalNot = 5
102 };
103 
105 {
106  Max = 0,
107  Average = 1,
108  L2 = 2
109 };
110 
111 enum class ResizeMethod
112 {
113  Bilinear = 0,
114  NearestNeighbor = 1
115 };
116 
117 enum class Dimensionality
118 {
119  NotSpecified = 0,
120  Specified = 1,
121  Scalar = 2
122 };
123 
124 ///
125 /// The padding method modifies the output of pooling layers.
126 /// In both supported methods, the values are ignored (they are
127 /// not even zeroes, which would make a difference for max pooling
128 /// a tensor with negative values). The difference between
129 /// IgnoreValue and Exclude is that the former counts the padding
130 /// fields in the divisor of Average and L2 pooling, while
131 /// Exclude does not.
132 ///
133 enum class PaddingMethod
134 {
135  /// The padding fields count, but are ignored
136  IgnoreValue = 0,
137  /// The padding fields don't count and are ignored
138  Exclude = 1
139 };
140 
142 {
143  Across = 0,
144  Within = 1
145 };
146 
148 {
149  /// Krichevsky 2012: Local Brightness Normalization
150  LocalBrightness = 0,
151  /// Jarret 2009: Local Contrast Normalization
152  LocalContrast = 1
153 };
154 
156 {
157  Floor = 0,
158  Ceiling = 1
159 };
160 
161 ///
162 /// The ShapeInferenceMethod modify how the output shapes are treated.
163 /// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer
164 /// and any mismatch is reported.
165 /// When InferAndValidate is selected 2 actions must be performed: (1)infer output shape from inputs and (2)validate the
166 /// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not
167 /// specified explicitly, however this information can be calculated from the inputs.
168 ///
170 {
171  /// Validate all output shapes
172  ValidateOnly = 0,
173  /// Infer missing output shapes and validate all output shapes
174  InferAndValidate = 1
175 };
176 
177 /// Each backend should implement an IBackend.
178 class IBackend
179 {
180 protected:
181  IBackend() {}
182  virtual ~IBackend() {}
183 
184 public:
185  virtual const BackendId& GetId() const = 0;
186 };
187 
188 using IBackendSharedPtr = std::shared_ptr<IBackend>;
189 using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
190 
191 /// Device specific knowledge to be passed to the optimizer.
193 {
194 protected:
196  virtual ~IDeviceSpec() {}
197 public:
198  virtual const BackendIdSet& GetSupportedBackends() const = 0;
199 };
200 
201 /// Type of identifiers for bindable layers (inputs, outputs).
202 using LayerBindingId = int;
203 
205 {
206 public:
207  using ValueType = unsigned int;
208  using SizeType = unsigned int;
209  using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
210  using ConstIterator = typename ArrayType::const_iterator;
211 
212  /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
213  /// when source and target potentially have different memory layouts.
214  ///
215  /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
216  /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
217  /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
218  /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
219  /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
220  /// [ 0, 2, 3, 1 ].
221  ///
222  /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
223  /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
224  /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
225  /// [ 0, 3, 1, 2 ].
226  ///
227  PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
228 
229  PermutationVector(std::initializer_list<ValueType> dimMappings);
230 
231  ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
232 
233  SizeType GetSize() const { return m_NumDimMappings; }
234 
235  ConstIterator begin() const { return m_DimMappings.begin(); }
236  ConstIterator end() const { return m_DimMappings.end(); }
237 
238  bool IsEqual(const PermutationVector& other) const
239  {
240  if (m_NumDimMappings != other.m_NumDimMappings) return false;
241  for (unsigned int i = 0; i < m_NumDimMappings; ++i)
242  {
243  if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
244  }
245  return true;
246  }
247 
248  bool IsInverse(const PermutationVector& other) const
249  {
250  bool isInverse = (GetSize() == other.GetSize());
251  for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
252  {
253  isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
254  }
255  return isInverse;
256  }
257 
258 private:
259  ArrayType m_DimMappings;
260  /// Number of valid entries in @ref m_DimMappings
261  SizeType m_NumDimMappings;
262 };
263 
264 namespace profiling { class ProfilingGuid; }
265 
266 /// Define LayerGuid type.
268 
269 class ITensorHandle;
270 
271 /// Define the type of callback for the Debug layer to call
272 /// @param guid - guid of layer connected to the input of the Debug layer
273 /// @param slotIndex - index of the output slot connected to the input of the Debug layer
274 /// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
275 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
276 
277 
278 namespace profiling
279 {
280 
281 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
282 
284 {
285 public:
286  ProfilingGuid() : m_Guid(0) {}
287 
288  ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
289 
290  operator uint64_t() const { return m_Guid; }
291 
292  bool operator==(const ProfilingGuid& other) const
293  {
294  return m_Guid == other.m_Guid;
295  }
296 
297  bool operator!=(const ProfilingGuid& other) const
298  {
299  return m_Guid != other.m_Guid;
300  }
301 
302  bool operator<(const ProfilingGuid& other) const
303  {
304  return m_Guid < other.m_Guid;
305  }
306 
307  bool operator<=(const ProfilingGuid& other) const
308  {
309  return m_Guid <= other.m_Guid;
310  }
311 
312  bool operator>(const ProfilingGuid& other) const
313  {
314  return m_Guid > other.m_Guid;
315  }
316 
317  bool operator>=(const ProfilingGuid& other) const
318  {
319  return m_Guid >= other.m_Guid;
320  }
321 
322 protected:
323  uint64_t m_Guid;
324 };
325 
326 /// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
328 {
330 };
331 
333 {
335 };
336 
337 } // namespace profiling
338 
339 } // namespace armnn
340 
341 
342 namespace std
343 {
344 /// make ProfilingGuid hashable
345 template<>
346 struct hash<armnn::profiling::ProfilingGuid>
347 {
348  std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
349  {
350  return hash<uint64_t>()(uint64_t(guid));
351  }
352 };
353 
354 /// make ProfilingDynamicGuid hashable
355 template<>
356 struct hash<armnn::profiling::ProfilingDynamicGuid>
357 {
358  std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
359  {
360  return hash<uint64_t>()(uint64_t(guid));
361  }
362 };
363 
364 /// make ProfilingStaticGuid hashable
365 template<>
366 struct hash<armnn::profiling::ProfilingStaticGuid>
367 {
368  std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
369  {
370  return hash<uint64_t>()(uint64_t(guid));
371  }
372 };
373 } // namespace std
bool operator<(const ProfilingGuid &other) const
Definition: Types.hpp:302
unsigned int ValueType
Definition: Types.hpp:207
bool operator>=(const ProfilingGuid &other) const
Definition: Types.hpp:317
Dimensionality
Definition: Types.hpp:117
DataLayout
Definition: Types.hpp:50
virtual ~IBackend()
Definition: Types.hpp:182
bool operator==(const ProfilingGuid &other) const
Definition: Types.hpp:292
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
std::size_t operator()(armnn::profiling::ProfilingGuid const &guid) const noexcept
Definition: Types.hpp:348
typename ArrayType::const_iterator ConstIterator
Definition: Types.hpp:210
Each backend should implement an IBackend.
Definition: Types.hpp:178
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
Definition: Types.hpp:327
The padding fields don&#39;t count and are ignored.
NormalizationAlgorithmChannel
Definition: Types.hpp:141
Copyright (c) 2020 ARM Limited.
SizeType GetSize() const
Definition: Types.hpp:233
PoolingAlgorithm
Definition: Types.hpp:104
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:275
std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const &guid) const noexcept
Definition: Types.hpp:358
LogicalBinaryOperation
Definition: Types.hpp:88
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:202
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:133
std::shared_ptr< IBackend > IBackendSharedPtr
Definition: Types.hpp:188
ComparisonOperation
Definition: Types.hpp:78
bool operator<=(const ProfilingGuid &other) const
Definition: Types.hpp:307
DataType
Definition: Types.hpp:32
#define ARMNN_DEPRECATED_ENUM_MSG(message)
Definition: Deprecated.hpp:50
std::size_t operator()(armnn::profiling::ProfilingStaticGuid const &guid) const noexcept
Definition: Types.hpp:368
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Definition: Types.hpp:209
Validate all output shapes.
Status
enumeration
Definition: Types.hpp:26
virtual ~IDeviceSpec()
Definition: Types.hpp:196
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:192
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
Definition: Types.hpp:21
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
Definition: Types.hpp:189
min(a, max(b, input)) ReLu1 & ReLu6.
unsigned int SizeType
Definition: Types.hpp:208
ValueType operator[](SizeType i) const
Definition: Types.hpp:231
OutputShapeRounding
Definition: Types.hpp:155
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:238
The padding fields count, but are ignored.
ProfilingGuid(uint64_t guid)
Definition: Types.hpp:288
bool operator!=(const ProfilingGuid &other) const
Definition: Types.hpp:297
Jarret 2009: Local Contrast Normalization.
ArgMinMaxFunction
Definition: Types.hpp:72
ConstIterator begin() const
Definition: Types.hpp:235
ResizeMethod
Definition: Types.hpp:111
ConstIterator end() const
Definition: Types.hpp:236
UnaryOperation
Definition: Types.hpp:94
Infer missing output shapes and validate all output shapes.
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
Definition: Types.hpp:248
NormalizationAlgorithmMethod
Definition: Types.hpp:147
bool operator>(const ProfilingGuid &other) const
Definition: Types.hpp:312
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:169
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:18
ActivationFunction
Definition: Types.hpp:56