ArmNN
 20.02
Types.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <array>
8 #include <functional>
9 #include <memory>
10 #include <stdint.h>
11 #include "BackendId.hpp"
12 #include "Exceptions.hpp"
13 #include "Deprecated.hpp"
14 
15 namespace armnn
16 {
17 
18 constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19 
20 /// The lowest performance data capture interval we support is 10 miliseconds.
21 constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
22 
23 /// @enum Status enumeration
24 /// @var Status::Successful
25 /// @var Status::Failure
26 enum class Status
27 {
28  Success = 0,
29  Failure = 1
30 };
31 
32 enum class DataType
33 {
34  Float16 = 0,
35  Float32 = 1,
36  QAsymmU8 = 2,
37  Signed32 = 3,
38  Boolean = 4,
39  QSymmS16 = 5,
40  QuantizedSymm8PerAxis ARMNN_DEPRECATED_ENUM_MSG("Per Axis property inferred by number of scales in TensorInfo") = 6,
41  QSymmS8 = 7,
42  QAsymmS8 = 8,
43  BFloat16 = 9,
44 
45  QuantisedAsymm8 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QAsymmU8 instead.") = QAsymmU8,
46  QuantisedSymm16 ARMNN_DEPRECATED_ENUM_MSG("Use DataType::QSymmS16 instead.") = QSymmS16
47 };
48 
49 enum class DataLayout
50 {
51  NCHW = 1,
52  NHWC = 2
53 };
54 
56 {
57  Sigmoid = 0,
58  TanH = 1,
59  Linear = 2,
60  ReLu = 3,
61  BoundedReLu = 4, ///< min(a, max(b, input)) ReLu1 & ReLu6.
62  SoftReLu = 5,
63  LeakyReLu = 6,
64  Abs = 7,
65  Sqrt = 8,
66  Square = 9,
67  Elu = 10,
68  HardSwish = 11
69 };
70 
72 {
73  Min = 0,
74  Max = 1
75 };
76 
78 {
79  Equal = 0,
80  Greater = 1,
81  GreaterOrEqual = 2,
82  Less = 3,
83  LessOrEqual = 4,
84  NotEqual = 5
85 };
86 
87 enum class UnaryOperation
88 {
89  Abs = 0,
90  Exp = 1,
91  Sqrt = 2,
92  Rsqrt = 3,
93  Neg = 4
94 };
95 
96 enum class PoolingAlgorithm
97 {
98  Max = 0,
99  Average = 1,
100  L2 = 2
101 };
102 
103 enum class ResizeMethod
104 {
105  Bilinear = 0,
106  NearestNeighbor = 1
107 };
108 
109 ///
110 /// The padding method modifies the output of pooling layers.
111 /// In both supported methods, the values are ignored (they are
112 /// not even zeroes, which would make a difference for max pooling
113 /// a tensor with negative values). The difference between
114 /// IgnoreValue and Exclude is that the former counts the padding
115 /// fields in the divisor of Average and L2 pooling, while
116 /// Exclude does not.
117 ///
118 enum class PaddingMethod
119 {
120  /// The padding fields count, but are ignored
121  IgnoreValue = 0,
122  /// The padding fields don't count and are ignored
123  Exclude = 1
124 };
125 
127 {
128  Across = 0,
129  Within = 1
130 };
131 
133 {
134  /// Krichevsky 2012: Local Brightness Normalization
135  LocalBrightness = 0,
136  /// Jarret 2009: Local Contrast Normalization
137  LocalContrast = 1
138 };
139 
141 {
142  Floor = 0,
143  Ceiling = 1
144 };
145 
146 /// Each backend should implement an IBackend.
147 class IBackend
148 {
149 protected:
150  IBackend() {}
151  virtual ~IBackend() {}
152 
153 public:
154  virtual const BackendId& GetId() const = 0;
155 };
156 
157 using IBackendSharedPtr = std::shared_ptr<IBackend>;
158 using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;
159 
160 /// Device specific knowledge to be passed to the optimizer.
162 {
163 protected:
165  virtual ~IDeviceSpec() {}
166 public:
167  virtual const BackendIdSet& GetSupportedBackends() const = 0;
168 };
169 
170 /// Type of identifiers for bindable layers (inputs, outputs).
171 using LayerBindingId = int;
172 
174 {
175 public:
176  using ValueType = unsigned int;
177  using SizeType = unsigned int;
178  using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
179  using ConstIterator = typename ArrayType::const_iterator;
180 
181  /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
182  /// when source and target potentially have different memory layouts.
183  ///
184  /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
185  /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
186  /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
187  /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
188  /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
189  /// [ 0, 2, 3, 1 ].
190  ///
191  /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
192  /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
193  /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
194  /// [ 0, 3, 1, 2 ].
195  ///
196  PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);
197 
198  PermutationVector(std::initializer_list<ValueType> dimMappings);
199 
200  ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }
201 
202  SizeType GetSize() const { return m_NumDimMappings; }
203 
204  ConstIterator begin() const { return m_DimMappings.begin(); }
205  ConstIterator end() const { return m_DimMappings.end(); }
206 
207  bool IsEqual(const PermutationVector& other) const
208  {
209  if (m_NumDimMappings != other.m_NumDimMappings) return false;
210  for (unsigned int i = 0; i < m_NumDimMappings; ++i)
211  {
212  if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
213  }
214  return true;
215  }
216 
217  bool IsInverse(const PermutationVector& other) const
218  {
219  bool isInverse = (GetSize() == other.GetSize());
220  for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
221  {
222  isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
223  }
224  return isInverse;
225  }
226 
227 private:
228  ArrayType m_DimMappings;
229  /// Number of valid entries in @ref m_DimMappings
230  SizeType m_NumDimMappings;
231 };
232 
233 namespace profiling { class ProfilingGuid; }
234 
235 /// Define LayerGuid type.
237 
238 class ITensorHandle;
239 
240 /// Define the type of callback for the Debug layer to call
241 /// @param guid - guid of layer connected to the input of the Debug layer
242 /// @param slotIndex - index of the output slot connected to the input of the Debug layer
243 /// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
244 using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;
245 
246 
247 namespace profiling
248 {
249 
250 static constexpr uint64_t MIN_STATIC_GUID = 1llu << 63;
251 
253 {
254 public:
255  ProfilingGuid(uint64_t guid) : m_Guid(guid) {}
256 
257  operator uint64_t() const { return m_Guid; }
258 
259  bool operator==(const ProfilingGuid& other) const
260  {
261  return m_Guid == other.m_Guid;
262  }
263 
264  bool operator!=(const ProfilingGuid& other) const
265  {
266  return m_Guid != other.m_Guid;
267  }
268 
269  bool operator<(const ProfilingGuid& other) const
270  {
271  return m_Guid < other.m_Guid;
272  }
273 
274  bool operator<=(const ProfilingGuid& other) const
275  {
276  return m_Guid <= other.m_Guid;
277  }
278 
279  bool operator>(const ProfilingGuid& other) const
280  {
281  return m_Guid > other.m_Guid;
282  }
283 
284  bool operator>=(const ProfilingGuid& other) const
285  {
286  return m_Guid >= other.m_Guid;
287  }
288 
289 protected:
290  uint64_t m_Guid;
291 };
292 
293 /// Strongly typed guids to distinguish between those generated at runtime, and those that are statically defined.
295 {
297 };
298 
300 {
302 };
303 
304 } // namespace profiling
305 
306 } // namespace armnn
307 
308 
309 namespace std
310 {
311 /// make ProfilingGuid hashable
312 template<>
313 struct hash<armnn::profiling::ProfilingGuid>
314 {
315  std::size_t operator()(armnn::profiling::ProfilingGuid const& guid) const noexcept
316  {
317  return hash<uint64_t>()(uint64_t(guid));
318  }
319 };
320 
321 /// make ProfilingDynamicGuid hashable
322 template<>
323 struct hash<armnn::profiling::ProfilingDynamicGuid>
324 {
325  std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const& guid) const noexcept
326  {
327  return hash<uint64_t>()(uint64_t(guid));
328  }
329 };
330 
331 /// make ProfilingStaticGuid hashable
332 template<>
333 struct hash<armnn::profiling::ProfilingStaticGuid>
334 {
335  std::size_t operator()(armnn::profiling::ProfilingStaticGuid const& guid) const noexcept
336  {
337  return hash<uint64_t>()(uint64_t(guid));
338  }
339 };
340 } // namespace std
bool operator<(const ProfilingGuid &other) const
Definition: Types.hpp:269
unsigned int ValueType
Definition: Types.hpp:176
bool operator>=(const ProfilingGuid &other) const
Definition: Types.hpp:284
DataLayout
Definition: Types.hpp:49
virtual ~IBackend()
Definition: Types.hpp:151
bool operator==(const ProfilingGuid &other) const
Definition: Types.hpp:259
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
std::size_t operator()(armnn::profiling::ProfilingGuid const &guid) const noexcept
Definition: Types.hpp:315
typename ArrayType::const_iterator ConstIterator
Definition: Types.hpp:179
Each backend should implement an IBackend.
Definition: Types.hpp:147
Strongly typed guids to distinguish between those generated at runtime, and those that are statically...
Definition: Types.hpp:294
The padding fields don&#39;t count and are ignored.
NormalizationAlgorithmChannel
Definition: Types.hpp:126
Copyright (c) 2020 ARM Limited.
SizeType GetSize() const
Definition: Types.hpp:202
PoolingAlgorithm
Definition: Types.hpp:96
std::function< void(LayerGuid guid, unsigned int slotIndex, ITensorHandle *tensorHandle)> DebugCallbackFunction
Define the type of callback for the Debug layer to call.
Definition: Types.hpp:244
std::size_t operator()(armnn::profiling::ProfilingDynamicGuid const &guid) const noexcept
Definition: Types.hpp:325
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:171
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:118
std::shared_ptr< IBackend > IBackendSharedPtr
Definition: Types.hpp:157
ComparisonOperation
Definition: Types.hpp:77
bool operator<=(const ProfilingGuid &other) const
Definition: Types.hpp:274
DataType
Definition: Types.hpp:32
#define ARMNN_DEPRECATED_ENUM_MSG(message)
Definition: Deprecated.hpp:50
std::size_t operator()(armnn::profiling::ProfilingStaticGuid const &guid) const noexcept
Definition: Types.hpp:335
std::array< ValueType, MaxNumOfTensorDimensions > ArrayType
Definition: Types.hpp:178
Status
enumeration
Definition: Types.hpp:26
virtual ~IDeviceSpec()
Definition: Types.hpp:165
Device specific knowledge to be passed to the optimizer.
Definition: Types.hpp:161
constexpr unsigned int LOWEST_CAPTURE_PERIOD
The lowest performance data capture interval we support is 10 miliseconds.
Definition: Types.hpp:21
std::unique_ptr< IBackend, void(*)(IBackend *backend)> IBackendUniquePtr
Definition: Types.hpp:158
min(a, max(b, input)) ReLu1 & ReLu6.
unsigned int SizeType
Definition: Types.hpp:177
ValueType operator[](SizeType i) const
Definition: Types.hpp:200
OutputShapeRounding
Definition: Types.hpp:140
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:207
The padding fields count, but are ignored.
ProfilingGuid(uint64_t guid)
Definition: Types.hpp:255
bool operator!=(const ProfilingGuid &other) const
Definition: Types.hpp:264
Jarret 2009: Local Contrast Normalization.
ArgMinMaxFunction
Definition: Types.hpp:71
ConstIterator begin() const
Definition: Types.hpp:204
ResizeMethod
Definition: Types.hpp:103
ConstIterator end() const
Definition: Types.hpp:205
UnaryOperation
Definition: Types.hpp:87
Krichevsky 2012: Local Brightness Normalization.
bool IsInverse(const PermutationVector &other) const
Definition: Types.hpp:217
NormalizationAlgorithmMethod
Definition: Types.hpp:132
bool operator>(const ProfilingGuid &other) const
Definition: Types.hpp:279
constexpr unsigned int MaxNumOfTensorDimensions
Definition: Types.hpp:18
ActivationFunction
Definition: Types.hpp:55