ArmNN
 21.11
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
22 struct BaseDescriptor {};
23 
24 /// An ActivationDescriptor for the ActivationLayer.
26 {
28  : m_Function(ActivationFunction::Sigmoid)
29  , m_A(0)
30  , m_B(0)
31  {}
32 
34  float a = 0,
35  float b = 0)
36  : m_Function(activation)
37  , m_A(a)
38  , m_B(b)
39  {}
40 
41  bool operator ==(const ActivationDescriptor &rhs) const
42  {
43  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
44  }
45 
46  /// @brief The activation function to use
47  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
49  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
50  float m_A;
51  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
52  float m_B;
53 };
54 
55 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
57 {
59  : m_Function(ArgMinMaxFunction::Min)
60  , m_Axis(-1)
61  , m_Output_Type(armnn::DataType::Signed32)
62  {}
63 
64  bool operator ==(const ArgMinMaxDescriptor &rhs) const
65  {
66  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
67  }
68 
69  /// Specify if the function is to find Min or Max.
71  /// Axis to reduce across the input tensor.
72  int m_Axis;
73  /// Deprecated and will be removed in future release.
75 };
76 
77 /// A ComparisonDescriptor for the ComparisonLayer
79 {
82  {}
83 
85  : m_Operation(operation)
86  {}
87 
88  bool operator ==(const ComparisonDescriptor &rhs) const
89  {
90  return m_Operation == rhs.m_Operation;
91  }
92 
93  /// Specifies the comparison operation to execute
95 };
96 
97 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
99 {
102  {}
103 
105  : m_Operation(operation)
106  {}
107 
109  {
110  return m_Operation == rhs.m_Operation;
111  }
112 
113  /// Specifies the elementwiseUnary operation to execute
115 };
116 
117 /// A PermuteDescriptor for the PermuteLayer.
119 {
121  : m_DimMappings{}
122  {}
123 
125  : m_DimMappings(dimMappings)
126  {}
127 
128  bool operator ==(const PermuteDescriptor &rhs) const
129  {
130  return m_DimMappings.IsEqual(rhs.m_DimMappings);
131  }
132 
133  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
134  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
136 };
137 
138 /// A SoftmaxDescriptor for the SoftmaxLayer.
140 {
142  : m_Beta(1.0f)
143  , m_Axis(-1)
144  {}
145 
146  bool operator ==(const SoftmaxDescriptor& rhs) const
147  {
148  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
149  }
150 
151  /// Exponentiation value.
152  float m_Beta;
153  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
154  int m_Axis;
155 };
156 
157 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
159 
160 /// @brief An OriginsDescriptor for the ConcatLayer.
161 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
162 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
164 {
166  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
167  OriginsDescriptor(const OriginsDescriptor& other);
169 
171 
172  OriginsDescriptor& operator=(OriginsDescriptor rhs);
173 
174  bool operator ==(const OriginsDescriptor& rhs) const;
175 
176  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
177  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
178  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
179  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
180  /// Get the number of views.
181  uint32_t GetNumViews() const;
182  /// Get the number of dimensions.
183  uint32_t GetNumDimensions() const;
184  /// Return the view origin at the int value idx.
185  const uint32_t* GetViewOrigin(uint32_t idx) const;
186  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
187  /// The number of views must match number of elements in the new ordering array.
188  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
189  /// Swap the ViewsDescriptor values first and second.
190  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
191  /// Set the concatenation axis value.
192  void SetConcatAxis(unsigned int concatAxis);
193  /// Get the concatenation axis value.
194  unsigned int GetConcatAxis() const;
195 
196 private:
197  unsigned int m_ConcatAxis;
198  uint32_t m_NumViews;
199  uint32_t m_NumDimensions;
200  uint32_t** m_ViewOrigins;
201 };
202 
203 /// @brief A ViewsDescriptor for the SplitterLayer.
204 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
205 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
207 {
208  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
209  ViewsDescriptor(const ViewsDescriptor& other);
210  ViewsDescriptor();
212 
213  ~ViewsDescriptor();
214 
215  ViewsDescriptor& operator=(ViewsDescriptor rhs);
216 
217  bool operator ==(const ViewsDescriptor& rhs) const;
218 
219  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
220  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
221  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
222  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
223  /// @brief Set the size of the views. The arguments are: view, dimension, value.
224  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
225  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
226  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
227 
228  /// Get the number of views.
229  uint32_t GetNumViews() const;
230  /// Get the number of dimensions.
231  uint32_t GetNumDimensions() const;
232  /// Get the view origin at the int value idx.
233  const uint32_t* GetViewOrigin(uint32_t idx) const;
234  /// Get the view sizes at the int value idx.
235  const uint32_t* GetViewSizes(uint32_t idx) const;
236  /// Get the View Origins
237  const OriginsDescriptor& GetOrigins() const;
238 
239  /// Swap the ViewsDescriptor value first and second.
240  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
241 private:
242  OriginsDescriptor m_Origins;
243  uint32_t** m_ViewSizes;
244 };
245 
246 
247 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
248 /// concatenation of a number of input tensors.
249 template <typename TensorShapeIt>
251  TensorShapeIt last,
252  unsigned int concatenationDimension)
253 {
254  auto numInputs = std::distance(first, last);
255 
256  if (numInputs < 2)
257  {
258  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
259  }
260 
261  const auto& firstInputShape = *first;
262 
263  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
264  for (auto it = first + 1; it != last; ++it)
265  {
266  if (it->GetNumDimensions() != numDimensions)
267  {
268  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
269  }
270  }
271 
272  if (concatenationDimension >= numDimensions)
273  {
274  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
275  }
276 
277  for (auto it = first; it != last; ++it)
278  {
279  for (unsigned int d = 0; d < numDimensions; ++d)
280  {
281  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
282  if (!dimSizeOk)
283  {
284  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
285  " except the concatenation dimension");
286  }
287  }
288  }
289 
290  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
291  viewsDescriptor.SetConcatAxis(concatenationDimension);
292 
293  uint32_t viewIndex = 0u;
294  uint32_t coordAlongConcatDim = 0u;
295  for (auto it = first; it != last; ++it)
296  {
297  const auto& inputShape = *it;
298 
299  for (unsigned int i = 0; i < concatenationDimension; ++i)
300  {
301  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
302  }
303 
304  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
305  unsigned int dimSize = inputShape[concatenationDimension];
306  coordAlongConcatDim += dimSize;
307 
308 
309  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
310  {
311  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
312  }
313 
314  ++viewIndex;
315  }
316 
317  return viewsDescriptor;
318 }
319 
320 /// A Pooling2dDescriptor for the Pooling2dLayer.
322 {
324  : m_PoolType(PoolingAlgorithm::Max)
325  , m_PadLeft(0)
326  , m_PadRight(0)
327  , m_PadTop(0)
328  , m_PadBottom(0)
329  , m_PoolWidth(0)
330  , m_PoolHeight(0)
331  , m_StrideX(0)
332  , m_StrideY(0)
333  , m_OutputShapeRounding(OutputShapeRounding::Floor)
334  , m_PaddingMethod(PaddingMethod::Exclude)
335  , m_DataLayout(DataLayout::NCHW)
336  {}
337 
338  bool operator ==(const Pooling2dDescriptor& rhs) const
339  {
340  return m_PoolType == rhs.m_PoolType &&
341  m_PadLeft == rhs.m_PadLeft &&
342  m_PadRight == rhs.m_PadRight &&
343  m_PadTop == rhs.m_PadTop &&
344  m_PadBottom == rhs.m_PadBottom &&
345  m_PoolWidth == rhs.m_PoolWidth &&
346  m_PoolHeight == rhs.m_PoolHeight &&
347  m_StrideX == rhs.m_StrideX &&
348  m_StrideY == rhs.m_StrideY &&
349  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
350  m_PaddingMethod == rhs.m_PaddingMethod &&
351  m_DataLayout == rhs.m_DataLayout;
352  }
353 
354  /// The pooling algorithm to use (Max. Average, L2).
356  /// Padding left value in the width dimension.
357  uint32_t m_PadLeft;
358  /// Padding right value in the width dimension.
359  uint32_t m_PadRight;
360  /// Padding top value in the height dimension.
361  uint32_t m_PadTop;
362  /// Padding bottom value in the height dimension.
363  uint32_t m_PadBottom;
364  /// Pooling width value.
365  uint32_t m_PoolWidth;
366  /// Pooling height value.
367  uint32_t m_PoolHeight;
368  /// Stride value when proceeding through input for the width dimension.
369  uint32_t m_StrideX;
370  /// Stride value when proceeding through input for the height dimension.
371  uint32_t m_StrideY;
372  /// The rounding method for the output shape. (Floor, Ceiling).
374  /// The padding method to be used. (Exclude, IgnoreValue).
376  /// The data layout to be used (NCHW, NHWC).
378 };
379 
380 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
382 {
384  : m_BiasEnabled(false)
385  , m_TransposeWeightMatrix(false)
386  , m_ConstantWeights(true)
387  {}
388 
389  bool operator ==(const FullyConnectedDescriptor& rhs) const
390  {
391  return m_BiasEnabled == rhs.m_BiasEnabled
392  && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
393  && m_ConstantWeights == rhs.m_ConstantWeights;
394  }
395 
396  /// Get the number of views/inputs.
397  ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use GetNumInputs instead", "22.05")
398  uint32_t GetNumViews() const;
399 
400  /// Get the number of views/inputs.
401  uint32_t GetNumInputs() const;
402 
403  /// Enable/disable bias.
405  /// Enable/disable transpose weight matrix.
407  /// Enable/disable constant weights and biases.
409 };
410 
411 /// A Convolution2dDescriptor for the Convolution2dLayer.
413 {
415  : m_PadLeft(0)
416  , m_PadRight(0)
417  , m_PadTop(0)
418  , m_PadBottom(0)
419  , m_StrideX(1)
420  , m_StrideY(1)
421  , m_DilationX(1)
422  , m_DilationY(1)
423  , m_BiasEnabled(false)
424  , m_DataLayout(DataLayout::NCHW)
425  {}
426 
427  bool operator ==(const Convolution2dDescriptor& rhs) const
428  {
429  return m_PadLeft == rhs.m_PadLeft &&
430  m_PadRight == rhs.m_PadRight &&
431  m_PadTop == rhs.m_PadTop &&
432  m_PadBottom == rhs.m_PadBottom &&
433  m_StrideX == rhs.m_StrideX &&
434  m_StrideY == rhs.m_StrideY &&
435  m_DilationX == rhs.m_DilationX &&
436  m_DilationY == rhs.m_DilationY &&
437  m_BiasEnabled == rhs.m_BiasEnabled &&
438  m_DataLayout == rhs.m_DataLayout;
439  }
440 
441  /// Padding left value in the width dimension.
442  uint32_t m_PadLeft;
443  /// Padding right value in the width dimension.
444  uint32_t m_PadRight;
445  /// Padding top value in the height dimension.
446  uint32_t m_PadTop;
447  /// Padding bottom value in the height dimension.
448  uint32_t m_PadBottom;
449  /// Stride value when proceeding through input for the width dimension.
450  uint32_t m_StrideX;
451  /// Stride value when proceeding through input for the height dimension.
452  uint32_t m_StrideY;
453  /// Dilation along x axis
454  uint32_t m_DilationX;
455  /// Dilation along y axis
456  uint32_t m_DilationY;
457  /// Enable/disable bias.
459  /// The data layout to be used (NCHW, NHWC).
461 };
462 
463 /// A Convolution3dDescriptor for the Convolution3dLayer.
465 {
467  : m_PadLeft(0)
468  , m_PadRight(0)
469  , m_PadTop(0)
470  , m_PadBottom(0)
471  , m_PadFront(0)
472  , m_PadBack(0)
473  , m_StrideX(1)
474  , m_StrideY(1)
475  , m_StrideZ(1)
476  , m_DilationX(1)
477  , m_DilationY(1)
478  , m_DilationZ(1)
479  , m_BiasEnabled(false)
480  , m_DataLayout(DataLayout::NDHWC)
481  {}
482 
483  bool operator ==(const Convolution3dDescriptor& rhs) const
484  {
485  return m_PadLeft == rhs.m_PadLeft &&
486  m_PadRight == rhs.m_PadRight &&
487  m_PadTop == rhs.m_PadTop &&
488  m_PadBottom == rhs.m_PadBottom &&
489  m_PadFront == rhs.m_PadFront &&
490  m_PadBack == rhs.m_PadBack &&
491  m_StrideX == rhs.m_StrideX &&
492  m_StrideY == rhs.m_StrideY &&
493  m_StrideZ == rhs.m_StrideZ &&
494  m_DilationX == rhs.m_DilationX &&
495  m_DilationY == rhs.m_DilationY &&
496  m_DilationZ == rhs.m_DilationZ &&
497  m_BiasEnabled == rhs.m_BiasEnabled &&
498  m_DataLayout == rhs.m_DataLayout;
499  }
500 
501  /// Get the number of views/inputs.
502  uint32_t GetNumInputs() const;
503 
504  /// Padding left value in the width dimension.
505  uint32_t m_PadLeft;
506  /// Padding right value in the width dimension.
507  uint32_t m_PadRight;
508  /// Padding top value in the height dimension.
509  uint32_t m_PadTop;
510  /// Padding bottom value in the height dimension.
511  uint32_t m_PadBottom;
512  /// Padding front value in the depth dimension.
513  uint32_t m_PadFront;
514  /// Padding back value in the depth dimension.
515  uint32_t m_PadBack;
516  /// Stride value when proceeding through input for the width dimension.
517  uint32_t m_StrideX;
518  /// Stride value when proceeding through input for the height dimension.
519  uint32_t m_StrideY;
520  /// Stride value when proceeding through input for the depth dimension.
521  uint32_t m_StrideZ;
522  /// Dilation along x axis
523  uint32_t m_DilationX;
524  /// Dilation along y axis
525  uint32_t m_DilationY;
526  /// Dilation along z axis
527  uint32_t m_DilationZ;
528  /// Enable/disable bias.
530  /// The data layout to be used (NDHWC, NCDHW).
532 };
533 
534 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
536 {
538  : m_PadLeft(0)
539  , m_PadRight(0)
540  , m_PadTop(0)
541  , m_PadBottom(0)
542  , m_StrideX(1)
543  , m_StrideY(1)
544  , m_DilationX(1)
545  , m_DilationY(1)
546  , m_BiasEnabled(false)
547  , m_DataLayout(DataLayout::NCHW)
548  {}
549 
551  {
552  return m_PadLeft == rhs.m_PadLeft &&
553  m_PadRight == rhs.m_PadRight &&
554  m_PadTop == rhs.m_PadTop &&
555  m_PadBottom == rhs.m_PadBottom &&
556  m_StrideX == rhs.m_StrideX &&
557  m_StrideY == rhs.m_StrideY &&
558  m_DilationX == rhs.m_DilationX &&
559  m_DilationY == rhs.m_DilationY &&
560  m_BiasEnabled == rhs.m_BiasEnabled &&
561  m_DataLayout == rhs.m_DataLayout;
562  }
563 
564  /// Padding left value in the width dimension.
565  uint32_t m_PadLeft;
566  /// Padding right value in the width dimension.
567  uint32_t m_PadRight;
568  /// Padding top value in the height dimension.
569  uint32_t m_PadTop;
570  /// Padding bottom value in the height dimension.
571  uint32_t m_PadBottom;
572  /// Stride value when proceeding through input for the width dimension.
573  uint32_t m_StrideX;
574  /// Stride value when proceeding through input for the height dimension.
575  uint32_t m_StrideY;
576  /// Dilation factor value for width dimension.
577  uint32_t m_DilationX;
578  /// Dilation factor value for height dimension.
579  uint32_t m_DilationY;
580  /// Enable/disable bias.
582  /// The data layout to be used (NCHW, NHWC).
584 };
585 
587 {
589  : m_MaxDetections(0)
590  , m_MaxClassesPerDetection(1)
591  , m_DetectionsPerClass(1)
592  , m_NmsScoreThreshold(0)
593  , m_NmsIouThreshold(0)
594  , m_NumClasses(0)
595  , m_UseRegularNms(false)
596  , m_ScaleX(0)
597  , m_ScaleY(0)
598  , m_ScaleW(0)
599  , m_ScaleH(0)
600  {}
601 
603  {
604  return m_MaxDetections == rhs.m_MaxDetections &&
605  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
606  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
607  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
608  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
609  m_NumClasses == rhs.m_NumClasses &&
610  m_UseRegularNms == rhs.m_UseRegularNms &&
611  m_ScaleX == rhs.m_ScaleX &&
612  m_ScaleY == rhs.m_ScaleY &&
613  m_ScaleW == rhs.m_ScaleW &&
614  m_ScaleH == rhs.m_ScaleH;
615  }
616 
617  /// Maximum numbers of detections.
618  uint32_t m_MaxDetections;
619  /// Maximum numbers of classes per detection, used in Fast NMS.
621  /// Detections per classes, used in Regular NMS.
623  /// NMS score threshold.
625  /// Intersection over union threshold.
627  /// Number of classes.
628  uint32_t m_NumClasses;
629  /// Use Regular NMS.
631  /// Center size encoding scale x.
632  float m_ScaleX;
633  /// Center size encoding scale y.
634  float m_ScaleY;
635  /// Center size encoding scale weight.
636  float m_ScaleW;
637  /// Center size encoding scale height.
638  float m_ScaleH;
639 };
640 
641 /// A NormalizationDescriptor for the NormalizationLayer.
643 {
645  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
646  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
647  , m_NormSize(0)
648  , m_Alpha(0.f)
649  , m_Beta(0.f)
650  , m_K(0.f)
651  , m_DataLayout(DataLayout::NCHW)
652  {}
653 
654  bool operator ==(const NormalizationDescriptor& rhs) const
655  {
656  return m_NormChannelType == rhs.m_NormChannelType &&
657  m_NormMethodType == rhs.m_NormMethodType &&
658  m_NormSize == rhs.m_NormSize &&
659  m_Alpha == rhs.m_Alpha &&
660  m_Beta == rhs.m_Beta &&
661  m_K == rhs.m_K &&
662  m_DataLayout == rhs.m_DataLayout;
663  }
664 
665  /// Normalization channel algorithm to use (Across, Within).
667  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
669  /// Depth radius value.
670  uint32_t m_NormSize;
671  /// Alpha value for the normalization equation.
672  float m_Alpha;
673  /// Beta value for the normalization equation.
674  float m_Beta;
675  /// Kappa value used for the across channel normalization equation.
676  float m_K;
677  /// The data layout to be used (NCHW, NHWC).
679 };
680 
681 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
683 {
685  : m_Eps(1e-12f)
686  , m_DataLayout(DataLayout::NCHW)
687  {}
688 
689  bool operator ==(const L2NormalizationDescriptor& rhs) const
690  {
691  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
692  }
693 
694  /// Used to avoid dividing by zero.
695  float m_Eps;
696  /// The data layout to be used (NCHW, NHWC).
698 };
699 
700 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
702 {
704  : m_Eps(0.0001f)
705  , m_DataLayout(DataLayout::NCHW)
706  {}
707 
709  {
710  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
711  }
712 
713  /// Value to add to the variance. Used to avoid dividing by zero.
714  float m_Eps;
715  /// The data layout to be used (NCHW, NHWC).
717 };
718 
719 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
721 {
723  : m_Gamma(1.0f)
724  , m_Beta(0.0f)
725  , m_Eps(1e-12f)
726  , m_DataLayout(DataLayout::NCHW)
727  {}
728 
730  {
731  return m_Gamma == rhs.m_Gamma &&
732  m_Beta == rhs.m_Beta &&
733  m_Eps == rhs.m_Eps &&
734  m_DataLayout == rhs.m_DataLayout;
735  }
736 
737  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
738  float m_Gamma;
739  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
740  float m_Beta;
741  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
742  float m_Eps;
743  /// The data layout to be used (NCHW, NHWC).
745 };
746 
747 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
749 {
751  : m_BlockShape({1, 1})
752  , m_Crops({{0, 0}, {0, 0}})
753  , m_DataLayout(DataLayout::NCHW)
754  {}
755 
756  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
757  std::vector<std::pair<unsigned int, unsigned int>> crops)
758  : m_BlockShape(blockShape)
759  , m_Crops(crops)
760  , m_DataLayout(DataLayout::NCHW)
761  {}
762 
763  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
764  {
765  return m_BlockShape == rhs.m_BlockShape &&
766  m_Crops == rhs.m_Crops &&
767  m_DataLayout == rhs.m_DataLayout;
768  }
769 
770  /// Block shape values.
771  std::vector<unsigned int> m_BlockShape;
772  /// The values to crop from the input dimension.
773  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
774  /// The data layout to be used (NCHW, NHWC).
776 };
777 
778 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
780 {
782  : m_Min(-6.0f)
783  , m_Max(6.0f)
784  {}
785 
787  {
788  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
789  }
790 
791  /// Minimum value.
792  float m_Min;
793  /// Maximum value.
794  float m_Max;
795 };
796 
797 /// A FillDescriptor for the FillLayer
799 {
801  : m_Value(0)
802  {}
803 
804  FillDescriptor(const float& value)
805  : m_Value(value)
806  {}
807 
808  bool operator ==(const FillDescriptor& rhs) const
809  {
810  return m_Value == rhs.m_Value;
811  }
812 
813  float m_Value;
814 };
815 
816 /// A GatherDescriptor for the GatherLayer.
818 {
820  : m_Axis(0)
821  {}
822 
823  GatherDescriptor(int32_t axis)
824  : m_Axis(axis)
825  {}
826 
827  bool operator ==(const GatherDescriptor& rhs) const
828  {
829  return m_Axis == rhs.m_Axis;
830  }
831 
832  /// The axis in params to gather indices from
833  int32_t m_Axis;
834 };
835 
836 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
838  "ResizeBilinearDescriptor is not supported anymore. Use ResizeDescriptor instead.",
839  "22.08")
840  ResizeBilinearDescriptor : BaseDescriptor
841 {
842  ResizeBilinearDescriptor()
843  : m_TargetWidth(0)
844  , m_TargetHeight(0)
845  , m_DataLayout(DataLayout::NCHW)
846  , m_AlignCorners(false)
847  , m_HalfPixelCenters(false)
848  {}
849 
851  bool operator ==(const ResizeBilinearDescriptor& rhs) const
852  {
853  return m_TargetWidth == rhs.m_TargetWidth &&
854  m_TargetHeight == rhs.m_TargetHeight &&
855  m_DataLayout == rhs.m_DataLayout &&
856  m_AlignCorners == rhs.m_AlignCorners &&
857  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
858  }
860 
861  /// Target width value.
862  uint32_t m_TargetWidth;
863  /// Target height value.
864  uint32_t m_TargetHeight;
865  /// The data layout to be used (NCHW, NHWC).
866  DataLayout m_DataLayout;
867  /// Aligned corners
868  bool m_AlignCorners;
869  /// Half Pixel Centers
870  bool m_HalfPixelCenters;
871 };
872 
873 /// A ResizeDescriptor for the ResizeLayer.
875 {
877  : m_TargetWidth(0)
878  , m_TargetHeight(0)
879  , m_Method(ResizeMethod::NearestNeighbor)
880  , m_DataLayout(DataLayout::NCHW)
881  , m_AlignCorners(false)
882  , m_HalfPixelCenters(false)
883  {}
884 
885  bool operator ==(const ResizeDescriptor& rhs) const
886  {
887  return m_TargetWidth == rhs.m_TargetWidth &&
888  m_TargetHeight == rhs.m_TargetHeight &&
889  m_Method == rhs.m_Method &&
890  m_DataLayout == rhs.m_DataLayout &&
891  m_AlignCorners == rhs.m_AlignCorners &&
892  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
893  }
894 
895  /// Target width value.
896  uint32_t m_TargetWidth;
897  /// Target height value.
898  uint32_t m_TargetHeight;
899  /// The Interpolation method to use
900  /// (Bilinear, NearestNeighbor).
902  /// The data layout to be used (NCHW, NHWC).
904  /// Aligned corners
906  /// Half Pixel Centers
908 };
909 
910 
911 /// A ReshapeDescriptor for the ReshapeLayer.
913 {
915  : m_TargetShape()
916  {}
917 
919  : m_TargetShape(shape)
920  {}
921 
922  bool operator ==(const ReshapeDescriptor& rhs) const
923  {
924  return m_TargetShape == rhs.m_TargetShape;
925  }
926 
927  /// Target shape value.
929 };
930 
931 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
933 {
935  : m_BlockShape({1, 1})
936  , m_PadList({{0, 0}, {0, 0}})
937  , m_DataLayout(DataLayout::NCHW)
938  {}
939 
940  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
941  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
942  : m_BlockShape(blockShape)
943  , m_PadList(padList)
944  , m_DataLayout(DataLayout::NCHW)
945  {}
946 
947  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
948  {
949  return m_BlockShape == rhs.m_BlockShape &&
950  m_PadList == rhs.m_PadList &&
951  m_DataLayout == rhs.m_DataLayout;
952  }
953 
954  /// Block shape value.
955  std::vector<unsigned int> m_BlockShape;
956  /// @brief Specifies the padding values for the input dimension:
957  /// heightPad{top, bottom} widthPad{left, right}.
958  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
959  /// The data layout to be used (NCHW, NHWC).
961 };
962 
963 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
965 {
968  {}
969 
970  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
971  : m_BlockSize(blockSize)
972  , m_DataLayout(dataLayout)
973  {}
974 
975  bool operator ==(const SpaceToDepthDescriptor& rhs) const
976  {
977  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
978  }
979 
980  /// Scalar specifying the input block size. It must be >= 1
981  unsigned int m_BlockSize;
982 
983  /// The data layout to be used (NCHW, NHWC).
985 };
986 
987 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
989 
990 /// An LstmDescriptor for the LstmLayer.
992 {
994  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
995  , m_ClippingThresCell(0.0)
996  , m_ClippingThresProj(0.0)
997  , m_CifgEnabled(true)
998  , m_PeepholeEnabled(false)
999  , m_ProjectionEnabled(false)
1000  , m_LayerNormEnabled(false)
1001  , m_TimeMajor(false)
1002  {}
1003 
1004  bool operator ==(const LstmDescriptor& rhs) const
1005  {
1006  return m_ActivationFunc == rhs.m_ActivationFunc &&
1007  m_ClippingThresCell == rhs.m_ClippingThresCell &&
1008  m_ClippingThresProj == rhs.m_ClippingThresProj &&
1009  m_CifgEnabled == rhs.m_CifgEnabled &&
1010  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1011  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1012  m_TimeMajor == rhs.m_TimeMajor;
1013  }
1014 
1015  /// @brief The activation function to use.
1016  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
1018  /// Clipping threshold value for the cell state.
1020  /// Clipping threshold value for the projection.
1022  /// Enable/disable cifg (coupled input & forget gate).
1024  /// Enable/disable peephole.
1026  /// Enable/disable the projection layer.
1028  /// Enable/disable layer normalization
1030  /// Enable/disable time major
1032 };
1033 
1035 
1036 /// A MeanDescriptor for the MeanLayer.
1038 {
1040  : m_Axis()
1041  , m_KeepDims(false)
1042  {}
1043 
1044  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
1045  : m_Axis(axis)
1046  , m_KeepDims(keepDims)
1047  {}
1048 
1049  bool operator ==(const MeanDescriptor& rhs) const
1050  {
1051  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
1052  }
1053 
1054  /// Values for the dimensions to reduce.
1055  std::vector<unsigned int> m_Axis;
1056  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
1058 };
1059 
1060 /// A PadDescriptor for the PadLayer.
1062 {
1063  PadDescriptor() : m_PadValue(0), m_PaddingMode(PaddingMode::Constant)
1064  {}
1065 
1066  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList,
1067  const float& padValue = 0,
1068  const PaddingMode& paddingMode = PaddingMode::Constant)
1069  : m_PadList(padList)
1070  , m_PadValue(padValue)
1071  , m_PaddingMode(paddingMode)
1072  {}
1073 
1074  bool operator ==(const PadDescriptor& rhs) const
1075  {
1076  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue && m_PaddingMode == rhs.m_PaddingMode;
1077  }
1078 
1079  /// @brief Specifies the padding for input dimension.
1080  /// First is the number of values to add before the tensor in the dimension.
1081  /// Second is the number of values to add after the tensor in the dimension.
1082  /// The number of pairs should match the number of dimensions in the input tensor.
1083  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1084 
1085  /// Optional value to use for padding, defaults to 0
1086  float m_PadValue;
1087 
1088  /// Specifies the Padding mode (Constant, Reflect or Symmetric)
1090 };
1091 
1092 /// A SliceDescriptor for the SliceLayer.
1094 {
1095  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1096  : m_Begin(begin)
1097  , m_Size(size)
1098  {}
1099 
1101  {}
1102 
1103  bool operator ==(const SliceDescriptor& rhs) const
1104  {
1105  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1106  }
1107 
1108  /// Beginning indices of the slice in each dimension.
1109  std::vector<unsigned int> m_Begin;
1110 
1111  /// Size of the slice in each dimension.
1112  std::vector<unsigned int> m_Size;
1113 };
1114 
1115 /// A StackDescriptor for the StackLayer.
1117 {
1119  : m_Axis(0)
1120  , m_NumInputs(0)
1121  , m_InputShape()
1122  {}
1123 
1124  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1125  : m_Axis(axis)
1126  , m_NumInputs(numInputs)
1127  , m_InputShape(inputShape)
1128  {}
1129 
1130  bool operator ==(const StackDescriptor& rhs) const
1131  {
1132  return m_Axis == rhs.m_Axis &&
1133  m_NumInputs == rhs.m_NumInputs &&
1134  m_InputShape == rhs.m_InputShape;
1135  }
1136 
1137  /// 0-based axis along which to stack the input tensors.
1138  uint32_t m_Axis;
1139  /// Number of input tensors.
1140  uint32_t m_NumInputs;
1141  /// Required shape of all input tensors.
1143 };
1144 
1145 /// A StandInDescriptor for the StandIn layer
1147 {
1149 
1150  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1151  : m_NumInputs(numInputs)
1152  , m_NumOutputs(numOutputs)
1153  {}
1154 
1155  bool operator ==(const StandInDescriptor& rhs) const
1156  {
1157  return m_NumInputs == rhs.m_NumInputs &&
1158  m_NumOutputs == rhs.m_NumOutputs;
1159  }
1160 
1161  /// Number of input tensors
1162  uint32_t m_NumInputs = 0;
1163  /// Number of output tensors
1164  uint32_t m_NumOutputs = 0;
1165 };
1166 
1167 /// A StridedSliceDescriptor for the StridedSliceLayer.
1169 {
1170  StridedSliceDescriptor(const std::vector<int>& begin,
1171  const std::vector<int>& end,
1172  const std::vector<int>& stride)
1173  : m_Begin(begin)
1174  , m_End(end)
1175  , m_Stride(stride)
1176  , m_BeginMask(0)
1177  , m_EndMask(0)
1178  , m_ShrinkAxisMask(0)
1179  , m_EllipsisMask(0)
1180  , m_NewAxisMask(0)
1181  , m_DataLayout(DataLayout::NCHW)
1182  {}
1183 
1185  : StridedSliceDescriptor({}, {}, {})
1186  {}
1187 
1188  bool operator ==(const StridedSliceDescriptor& rhs) const
1189  {
1190  return m_Begin == rhs.m_Begin &&
1191  m_End == rhs.m_End &&
1192  m_Stride == rhs.m_Stride &&
1193  m_BeginMask == rhs.m_BeginMask &&
1194  m_EndMask == rhs.m_EndMask &&
1195  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1196  m_EllipsisMask == rhs.m_EllipsisMask &&
1197  m_NewAxisMask == rhs.m_NewAxisMask &&
1198  m_DataLayout == rhs.m_DataLayout;
1199  }
1200 
1201  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1202  int GetStopForAxis(const TensorShape& inputShape,
1203  unsigned int axis,
1204  int startForAxis) const;
1205 
1206  /// Begin values for the input that will be sliced.
1207  std::vector<int> m_Begin;
1208  /// End values for the input that will be sliced.
1209  std::vector<int> m_End;
1210  /// Stride values for the input that will be sliced.
1211  std::vector<int> m_Stride;
1212 
1213  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1214  /// range is used for the dimension.
1215  int32_t m_BeginMask;
1216  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1217  /// is used for the dimension.
1218  int32_t m_EndMask;
1219  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1221  /// Ellipsis mask value.
1223  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1224  /// a new 1 dimension is inserted to this location of the output tensor.
1225  int32_t m_NewAxisMask;
1226 
1227  /// The data layout to be used (NCHW, NHWC).
1229 };
1230 
1231 /// A PreCompiledDescriptor for the PreCompiledLayer.
1233 {
1234  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1235  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1236  {}
1237 
1238  ~PreCompiledDescriptor() = default;
1239 
1240  unsigned int m_NumInputSlots;
1241  unsigned int m_NumOutputSlots;
1242 };
1243 
1244 /// A QLstmDescriptor for the QLstmLayer.
1246 {
1248  : m_CellClip(0.0)
1249  , m_ProjectionClip(0.0)
1250  , m_CifgEnabled(true)
1251  , m_PeepholeEnabled(false)
1252  , m_ProjectionEnabled(false)
1253  , m_LayerNormEnabled(false)
1254  , m_InputIntermediateScale(0.0)
1255  , m_ForgetIntermediateScale(0.0)
1256  , m_CellIntermediateScale(0.0)
1257  , m_OutputIntermediateScale(0.0)
1258  , m_HiddenStateZeroPoint(0)
1259  , m_HiddenStateScale(0.0)
1260  {}
1261 
1262  bool operator ==(const QLstmDescriptor& rhs) const
1263  {
1264  return m_CellClip == rhs.m_CellClip &&
1265  m_ProjectionClip == rhs.m_ProjectionClip &&
1266  m_CifgEnabled == rhs.m_CifgEnabled &&
1267  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1268  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1269  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1270  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1271  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1272  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1273  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1274  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1275  m_HiddenStateScale == rhs.m_HiddenStateScale;
1276  }
1277 
1278  /// Clipping threshold value for the cell state
1279  float m_CellClip;
1280  /// Clipping threshold value for the projection
1282  /// Enable/disable CIFG (coupled input & forget gate).
1284  /// Enable/disable peephole
1286  /// Enable/disable the projection layer
1288  /// Enable/disable layer normalization
1290  /// Input intermediate quantization scale
1292  /// Forget intermediate quantization scale
1294  /// Cell intermediate quantization scale
1296  /// Output intermediate quantization scale
1298  /// Hidden State zero point
1300  /// Hidden State quantization scale
1302 };
1303 
1304 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1306 {
1308  m_PadLeft(0),
1309  m_PadRight(0),
1310  m_PadTop(0),
1311  m_PadBottom(0),
1312  m_StrideX(0),
1313  m_StrideY(0),
1314  m_BiasEnabled(false),
1315  m_DataLayout(DataLayout::NCHW),
1316  m_OutputShapeEnabled(false)
1317  {}
1318 
1320  {
1321  return m_PadLeft == rhs.m_PadLeft &&
1322  m_PadRight == rhs.m_PadRight &&
1323  m_PadTop == rhs.m_PadTop &&
1324  m_PadBottom == rhs.m_PadBottom &&
1325  m_StrideX == rhs.m_StrideX &&
1326  m_StrideY == rhs.m_StrideY &&
1327  m_BiasEnabled == rhs.m_BiasEnabled &&
1328  m_DataLayout == rhs.m_DataLayout &&
1329  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1330  m_OutputShape == rhs.m_OutputShape;
1331  }
1332 
1333  /// Padding left value in the width dimension.
1334  uint32_t m_PadLeft;
1335  /// Padding right value in the width dimension.
1336  uint32_t m_PadRight;
1337  /// Padding top value in the height dimension.
1338  uint32_t m_PadTop;
1339  /// Padding bottom value in the height dimension.
1340  uint32_t m_PadBottom;
1341  /// Stride value when proceeding through input for the width dimension.
1342  uint32_t m_StrideX;
1343  /// Stride value when proceeding through input for the height dimension.
1344  uint32_t m_StrideY;
1345  /// Enable/disable bias.
1347  /// The data layout to be used (NCHW, NHWC).
1349  /// Output shape if it has been specified.
1351  std::vector<unsigned int> m_OutputShape;
1352 };
1353 
1354 /// A TransposeDescriptor for the TransposeLayer.
1356 {
1358  : m_DimMappings{}
1359  {}
1360 
1362  : m_DimMappings(dimMappings)
1363  {}
1364 
1365  bool operator ==(const TransposeDescriptor &rhs) const
1366  {
1367  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1368  }
1369 
1370  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1371  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1373 };
1374 
1375 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1377 {
1380  {}
1381 
1383  : m_Operation(operation)
1384  {}
1385 
1386  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1387  {
1388  return m_Operation == rhs.m_Operation;
1389  }
1390 
1391  /// Specifies the logical operation to execute
1393 };
1394 
1395 /// A ReduceDescriptor for the REDUCE operators.
1397 {
1399  : m_KeepDims(false)
1400  , m_vAxis()
1401  , m_ReduceOperation(ReduceOperation::Sum)
1402  {}
1403 
1404  bool operator ==(const ReduceDescriptor& rhs) const
1405  {
1406  return m_KeepDims == rhs.m_KeepDims &&
1407  m_vAxis == rhs.m_vAxis &&
1408  m_ReduceOperation == rhs.m_ReduceOperation;
1409  }
1410 
1411  /// if true then output shape has no change.
1413  /// The indices of the dimensions to reduce.
1414  std::vector<uint32_t> m_vAxis;
1415  /// Specifies the reduction operation to execute
1417 };
1418 
1419 /// A ChannelShuffleDescriptor for the ChannelShuffle operator
1421 {
1423  : m_NumGroups(0), m_Axis(0)
1424  {}
1425 
1426  ChannelShuffleDescriptor(const uint32_t& numGroups, const uint32_t& axis)
1427  : m_NumGroups(numGroups), m_Axis(axis)
1428  {}
1429 
1430  bool operator ==(const ChannelShuffleDescriptor& rhs) const
1431  {
1432  return m_NumGroups == rhs.m_NumGroups;
1433  }
1434 
1435  /// Number of groups for the channel shuffle operation
1436  uint32_t m_NumGroups;
1437  /// Axis to apply channel shuffle operation on
1438  uint32_t m_Axis;
1439 };
1440 
1441 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:49
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
uint32_t m_PadBack
Padding back value in the depth dimension.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0, const PaddingMode &paddingMode=PaddingMode::Constant)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:33
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:180
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
bool m_TimeMajor
Enable/disable time major.
ChannelShuffleDescriptor(const uint32_t &numGroups, const uint32_t &axis)
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:123
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:105
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:161
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:84
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:95
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:130
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:35
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:29
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
uint32_t m_PadFront
Padding front value in the depth dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
class ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable IStrategy instead.", "22.05") ILayerVisitor
PaddingMode
The padding mode controls whether the padding should be filled with constant values (Constant)...
Definition: Types.hpp:173
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A Convolution3dDescriptor for the Convolution3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
uint32_t m_NumGroups
Number of groups for the channel shuffle operation.
PaddingMode m_PaddingMode
Specifies the Padding mode (Constant, Reflect or Symmetric)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:194
DataLayout m_DataLayout
The data layout to be used (NDHWC, NCDHW).
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:89
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:139
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:111
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_Axis
Axis to apply channel shuffle operation on.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
float m_ScaleY
Center size encoding scale y.
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:186
A ChannelShuffleDescriptor for the ChannelShuffle operator.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
uint32_t m_DilationZ
Dilation along z axis.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Definition: Descriptors.hpp:74
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_DilationY
Dilation along y axis.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:73
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.