ArmNN
 21.08
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
22 struct BaseDescriptor {};
23 
24 /// An ActivationDescriptor for the ActivationLayer.
26 {
28  : m_Function(ActivationFunction::Sigmoid)
29  , m_A(0)
30  , m_B(0)
31  {}
32 
34  float a = 0,
35  float b = 0)
36  : m_Function(activation)
37  , m_A(a)
38  , m_B(b)
39  {}
40 
41  bool operator ==(const ActivationDescriptor &rhs) const
42  {
43  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
44  }
45 
46  /// @brief The activation function to use
47  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
49  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
50  float m_A;
51  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
52  float m_B;
53 };
54 
55 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
57 {
59  : m_Function(ArgMinMaxFunction::Min)
60  , m_Axis(-1)
61  , m_Output_Type(armnn::DataType::Signed32)
62  {}
63 
64  bool operator ==(const ArgMinMaxDescriptor &rhs) const
65  {
66  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
67  }
68 
69  /// Specify if the function is to find Min or Max.
71  /// Axis to reduce across the input tensor.
72  int m_Axis;
73  /// Deprecated and will be removed in future release.
75 };
76 
77 /// A ComparisonDescriptor for the ComparisonLayer
79 {
82  {}
83 
85  : m_Operation(operation)
86  {}
87 
88  bool operator ==(const ComparisonDescriptor &rhs) const
89  {
90  return m_Operation == rhs.m_Operation;
91  }
92 
93  /// Specifies the comparison operation to execute
95 };
96 
97 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
99 {
102  {}
103 
105  : m_Operation(operation)
106  {}
107 
109  {
110  return m_Operation == rhs.m_Operation;
111  }
112 
113  /// Specifies the elementwiseUnary operation to execute
115 };
116 
117 /// A PermuteDescriptor for the PermuteLayer.
119 {
121  : m_DimMappings{}
122  {}
123 
125  : m_DimMappings(dimMappings)
126  {}
127 
128  bool operator ==(const PermuteDescriptor &rhs) const
129  {
130  return m_DimMappings.IsEqual(rhs.m_DimMappings);
131  }
132 
133  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
134  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
136 };
137 
138 /// A SoftmaxDescriptor for the SoftmaxLayer.
140 {
142  : m_Beta(1.0f)
143  , m_Axis(-1)
144  {}
145 
146  bool operator ==(const SoftmaxDescriptor& rhs) const
147  {
148  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
149  }
150 
151  /// Exponentiation value.
152  float m_Beta;
153  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
154  int m_Axis;
155 };
156 
157 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
159 
160 /// @brief An OriginsDescriptor for the ConcatLayer.
161 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
162 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
164 {
166  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
167  OriginsDescriptor(const OriginsDescriptor& other);
169 
171 
172  OriginsDescriptor& operator=(OriginsDescriptor rhs);
173 
174  bool operator ==(const OriginsDescriptor& rhs) const;
175 
176  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
177  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
178  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
179  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
180  /// Get the number of views.
181  uint32_t GetNumViews() const;
182  /// Get the number of dimensions.
183  uint32_t GetNumDimensions() const;
184  /// Return the view origin at the int value idx.
185  const uint32_t* GetViewOrigin(uint32_t idx) const;
186  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
187  /// The number of views must match number of elements in the new ordering array.
188  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
189  /// Swap the ViewsDescriptor values first and second.
190  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
191  /// Set the concatenation axis value.
192  void SetConcatAxis(unsigned int concatAxis);
193  /// Get the concatenation axis value.
194  unsigned int GetConcatAxis() const;
195 
196 private:
197  unsigned int m_ConcatAxis;
198  uint32_t m_NumViews;
199  uint32_t m_NumDimensions;
200  uint32_t** m_ViewOrigins;
201 };
202 
203 /// @brief A ViewsDescriptor for the SplitterLayer.
204 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
205 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
207 {
208  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
209  ViewsDescriptor(const ViewsDescriptor& other);
210  ViewsDescriptor();
212 
213  ~ViewsDescriptor();
214 
215  ViewsDescriptor& operator=(ViewsDescriptor rhs);
216 
217  bool operator ==(const ViewsDescriptor& rhs) const;
218 
219  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
220  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
221  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
222  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
223  /// @brief Set the size of the views. The arguments are: view, dimension, value.
224  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
225  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
226  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
227 
228  /// Get the number of views.
229  uint32_t GetNumViews() const;
230  /// Get the number of dimensions.
231  uint32_t GetNumDimensions() const;
232  /// Get the view origin at the int value idx.
233  const uint32_t* GetViewOrigin(uint32_t idx) const;
234  /// Get the view sizes at the int value idx.
235  const uint32_t* GetViewSizes(uint32_t idx) const;
236  /// Get the View Origins
237  const OriginsDescriptor& GetOrigins() const;
238 
239  /// Swap the ViewsDescriptor value first and second.
240  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
241 private:
242  OriginsDescriptor m_Origins;
243  uint32_t** m_ViewSizes;
244 };
245 
246 template <typename TensorShapeIt>
247 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
249  TensorShapeIt last,
250  unsigned int concatenationDimension)
251 {
252  return CreateDescriptorForConcatenation(first, last, concatenationDimension);
253 }
254 
255 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
256 /// concatenation of a number of input tensors.
257 template <typename TensorShapeIt>
259  TensorShapeIt last,
260  unsigned int concatenationDimension)
261 {
262  auto numInputs = std::distance(first, last);
263 
264  if (numInputs < 2)
265  {
266  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
267  }
268 
269  const auto& firstInputShape = *first;
270 
271  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
272  for (auto it = first + 1; it != last; ++it)
273  {
274  if (it->GetNumDimensions() != numDimensions)
275  {
276  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
277  }
278  }
279 
280  if (concatenationDimension >= numDimensions)
281  {
282  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
283  }
284 
285  for (auto it = first; it != last; ++it)
286  {
287  for (unsigned int d = 0; d < numDimensions; ++d)
288  {
289  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
290  if (!dimSizeOk)
291  {
292  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
293  " except the concatenation dimension");
294  }
295  }
296  }
297 
298  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
299  viewsDescriptor.SetConcatAxis(concatenationDimension);
300 
301  uint32_t viewIndex = 0u;
302  uint32_t coordAlongConcatDim = 0u;
303  for (auto it = first; it != last; ++it)
304  {
305  const auto& inputShape = *it;
306 
307  for (unsigned int i = 0; i < concatenationDimension; ++i)
308  {
309  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
310  }
311 
312  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
313  unsigned int dimSize = inputShape[concatenationDimension];
314  coordAlongConcatDim += dimSize;
315 
316 
317  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
318  {
319  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
320  }
321 
322  ++viewIndex;
323  }
324 
325  return viewsDescriptor;
326 }
327 
328 /// A Pooling2dDescriptor for the Pooling2dLayer.
330 {
332  : m_PoolType(PoolingAlgorithm::Max)
333  , m_PadLeft(0)
334  , m_PadRight(0)
335  , m_PadTop(0)
336  , m_PadBottom(0)
337  , m_PoolWidth(0)
338  , m_PoolHeight(0)
339  , m_StrideX(0)
340  , m_StrideY(0)
341  , m_OutputShapeRounding(OutputShapeRounding::Floor)
342  , m_PaddingMethod(PaddingMethod::Exclude)
343  , m_DataLayout(DataLayout::NCHW)
344  {}
345 
346  bool operator ==(const Pooling2dDescriptor& rhs) const
347  {
348  return m_PoolType == rhs.m_PoolType &&
349  m_PadLeft == rhs.m_PadLeft &&
350  m_PadRight == rhs.m_PadRight &&
351  m_PadTop == rhs.m_PadTop &&
352  m_PadBottom == rhs.m_PadBottom &&
353  m_PoolWidth == rhs.m_PoolWidth &&
354  m_PoolHeight == rhs.m_PoolHeight &&
355  m_StrideX == rhs.m_StrideX &&
356  m_StrideY == rhs.m_StrideY &&
357  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
358  m_PaddingMethod == rhs.m_PaddingMethod &&
359  m_DataLayout == rhs.m_DataLayout;
360  }
361 
362  /// The pooling algorithm to use (Max. Average, L2).
364  /// Padding left value in the width dimension.
365  uint32_t m_PadLeft;
366  /// Padding right value in the width dimension.
367  uint32_t m_PadRight;
368  /// Padding top value in the height dimension.
369  uint32_t m_PadTop;
370  /// Padding bottom value in the height dimension.
371  uint32_t m_PadBottom;
372  /// Pooling width value.
373  uint32_t m_PoolWidth;
374  /// Pooling height value.
375  uint32_t m_PoolHeight;
376  /// Stride value when proceeding through input for the width dimension.
377  uint32_t m_StrideX;
378  /// Stride value when proceeding through input for the height dimension.
379  uint32_t m_StrideY;
380  /// The rounding method for the output shape. (Floor, Ceiling).
382  /// The padding method to be used. (Exclude, IgnoreValue).
384  /// The data layout to be used (NCHW, NHWC).
386 };
387 
388 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
390 {
392  : m_BiasEnabled(false)
393  , m_TransposeWeightMatrix(false)
394  , m_ConstantWeights(true)
395  {}
396 
397  bool operator ==(const FullyConnectedDescriptor& rhs) const
398  {
399  return m_BiasEnabled == rhs.m_BiasEnabled
400  && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
401  && m_ConstantWeights == rhs.m_ConstantWeights;
402  }
403 
404  /// Get the number of views/inputs.
405  ARMNN_DEPRECATED_MSG("Use GetNumInputs instead")
406  uint32_t GetNumViews() const;
407 
408  /// Get the number of views/inputs.
409  uint32_t GetNumInputs() const;
410 
411  /// Enable/disable bias.
413  /// Enable/disable transpose weight matrix.
415  /// Enable/disable constant weights and biases.
417 };
418 
419 /// A Convolution2dDescriptor for the Convolution2dLayer.
421 {
423  : m_PadLeft(0)
424  , m_PadRight(0)
425  , m_PadTop(0)
426  , m_PadBottom(0)
427  , m_StrideX(1)
428  , m_StrideY(1)
429  , m_DilationX(1)
430  , m_DilationY(1)
431  , m_BiasEnabled(false)
432  , m_DataLayout(DataLayout::NCHW)
433  {}
434 
435  bool operator ==(const Convolution2dDescriptor& rhs) const
436  {
437  return m_PadLeft == rhs.m_PadLeft &&
438  m_PadRight == rhs.m_PadRight &&
439  m_PadTop == rhs.m_PadTop &&
440  m_PadBottom == rhs.m_PadBottom &&
441  m_StrideX == rhs.m_StrideX &&
442  m_StrideY == rhs.m_StrideY &&
443  m_DilationX == rhs.m_DilationX &&
444  m_DilationY == rhs.m_DilationY &&
445  m_BiasEnabled == rhs.m_BiasEnabled &&
446  m_DataLayout == rhs.m_DataLayout;
447  }
448 
449  /// Padding left value in the width dimension.
450  uint32_t m_PadLeft;
451  /// Padding right value in the width dimension.
452  uint32_t m_PadRight;
453  /// Padding top value in the height dimension.
454  uint32_t m_PadTop;
455  /// Padding bottom value in the height dimension.
456  uint32_t m_PadBottom;
457  /// Stride value when proceeding through input for the width dimension.
458  uint32_t m_StrideX;
459  /// Stride value when proceeding through input for the height dimension.
460  uint32_t m_StrideY;
461  /// Dilation along x axis
462  uint32_t m_DilationX;
463  /// Dilation along y axis
464  uint32_t m_DilationY;
465  /// Enable/disable bias.
467  /// The data layout to be used (NCHW, NHWC).
469 };
470 
471 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
473 {
475  : m_PadLeft(0)
476  , m_PadRight(0)
477  , m_PadTop(0)
478  , m_PadBottom(0)
479  , m_StrideX(1)
480  , m_StrideY(1)
481  , m_DilationX(1)
482  , m_DilationY(1)
483  , m_BiasEnabled(false)
484  , m_DataLayout(DataLayout::NCHW)
485  {}
486 
488  {
489  return m_PadLeft == rhs.m_PadLeft &&
490  m_PadRight == rhs.m_PadRight &&
491  m_PadTop == rhs.m_PadTop &&
492  m_PadBottom == rhs.m_PadBottom &&
493  m_StrideX == rhs.m_StrideX &&
494  m_StrideY == rhs.m_StrideY &&
495  m_DilationX == rhs.m_DilationX &&
496  m_DilationY == rhs.m_DilationY &&
497  m_BiasEnabled == rhs.m_BiasEnabled &&
498  m_DataLayout == rhs.m_DataLayout;
499  }
500 
501  /// Padding left value in the width dimension.
502  uint32_t m_PadLeft;
503  /// Padding right value in the width dimension.
504  uint32_t m_PadRight;
505  /// Padding top value in the height dimension.
506  uint32_t m_PadTop;
507  /// Padding bottom value in the height dimension.
508  uint32_t m_PadBottom;
509  /// Stride value when proceeding through input for the width dimension.
510  uint32_t m_StrideX;
511  /// Stride value when proceeding through input for the height dimension.
512  uint32_t m_StrideY;
513  /// Dilation factor value for width dimension.
514  uint32_t m_DilationX;
515  /// Dilation factor value for height dimension.
516  uint32_t m_DilationY;
517  /// Enable/disable bias.
519  /// The data layout to be used (NCHW, NHWC).
521 };
522 
524 {
526  : m_MaxDetections(0)
527  , m_MaxClassesPerDetection(1)
528  , m_DetectionsPerClass(1)
529  , m_NmsScoreThreshold(0)
530  , m_NmsIouThreshold(0)
531  , m_NumClasses(0)
532  , m_UseRegularNms(false)
533  , m_ScaleX(0)
534  , m_ScaleY(0)
535  , m_ScaleW(0)
536  , m_ScaleH(0)
537  {}
538 
540  {
541  return m_MaxDetections == rhs.m_MaxDetections &&
542  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
543  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
544  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
545  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
546  m_NumClasses == rhs.m_NumClasses &&
547  m_UseRegularNms == rhs.m_UseRegularNms &&
548  m_ScaleX == rhs.m_ScaleX &&
549  m_ScaleY == rhs.m_ScaleY &&
550  m_ScaleW == rhs.m_ScaleW &&
551  m_ScaleH == rhs.m_ScaleH;
552  }
553 
554  /// Maximum numbers of detections.
555  uint32_t m_MaxDetections;
556  /// Maximum numbers of classes per detection, used in Fast NMS.
558  /// Detections per classes, used in Regular NMS.
560  /// NMS score threshold.
562  /// Intersection over union threshold.
564  /// Number of classes.
565  uint32_t m_NumClasses;
566  /// Use Regular NMS.
568  /// Center size encoding scale x.
569  float m_ScaleX;
570  /// Center size encoding scale y.
571  float m_ScaleY;
572  /// Center size encoding scale weight.
573  float m_ScaleW;
574  /// Center size encoding scale height.
575  float m_ScaleH;
576 };
577 
578 /// A NormalizationDescriptor for the NormalizationLayer.
580 {
582  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
583  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
584  , m_NormSize(0)
585  , m_Alpha(0.f)
586  , m_Beta(0.f)
587  , m_K(0.f)
588  , m_DataLayout(DataLayout::NCHW)
589  {}
590 
591  bool operator ==(const NormalizationDescriptor& rhs) const
592  {
593  return m_NormChannelType == rhs.m_NormChannelType &&
594  m_NormMethodType == rhs.m_NormMethodType &&
595  m_NormSize == rhs.m_NormSize &&
596  m_Alpha == rhs.m_Alpha &&
597  m_Beta == rhs.m_Beta &&
598  m_K == rhs.m_K &&
599  m_DataLayout == rhs.m_DataLayout;
600  }
601 
602  /// Normalization channel algorithm to use (Across, Within).
604  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
606  /// Depth radius value.
607  uint32_t m_NormSize;
608  /// Alpha value for the normalization equation.
609  float m_Alpha;
610  /// Beta value for the normalization equation.
611  float m_Beta;
612  /// Kappa value used for the across channel normalization equation.
613  float m_K;
614  /// The data layout to be used (NCHW, NHWC).
616 };
617 
618 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
620 {
622  : m_Eps(1e-12f)
623  , m_DataLayout(DataLayout::NCHW)
624  {}
625 
626  bool operator ==(const L2NormalizationDescriptor& rhs) const
627  {
628  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
629  }
630 
631  /// Used to avoid dividing by zero.
632  float m_Eps;
633  /// The data layout to be used (NCHW, NHWC).
635 };
636 
637 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
639 {
641  : m_Eps(0.0001f)
642  , m_DataLayout(DataLayout::NCHW)
643  {}
644 
646  {
647  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
648  }
649 
650  /// Value to add to the variance. Used to avoid dividing by zero.
651  float m_Eps;
652  /// The data layout to be used (NCHW, NHWC).
654 };
655 
656 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
658 {
660  : m_Gamma(1.0f)
661  , m_Beta(0.0f)
662  , m_Eps(1e-12f)
663  , m_DataLayout(DataLayout::NCHW)
664  {}
665 
667  {
668  return m_Gamma == rhs.m_Gamma &&
669  m_Beta == rhs.m_Beta &&
670  m_Eps == rhs.m_Eps &&
671  m_DataLayout == rhs.m_DataLayout;
672  }
673 
674  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
675  float m_Gamma;
676  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
677  float m_Beta;
678  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
679  float m_Eps;
680  /// The data layout to be used (NCHW, NHWC).
682 };
683 
684 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
686 {
688  : m_BlockShape({1, 1})
689  , m_Crops({{0, 0}, {0, 0}})
690  , m_DataLayout(DataLayout::NCHW)
691  {}
692 
693  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
694  std::vector<std::pair<unsigned int, unsigned int>> crops)
695  : m_BlockShape(blockShape)
696  , m_Crops(crops)
697  , m_DataLayout(DataLayout::NCHW)
698  {}
699 
700  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
701  {
702  return m_BlockShape == rhs.m_BlockShape &&
703  m_Crops == rhs.m_Crops &&
704  m_DataLayout == rhs.m_DataLayout;
705  }
706 
707  /// Block shape values.
708  std::vector<unsigned int> m_BlockShape;
709  /// The values to crop from the input dimension.
710  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
711  /// The data layout to be used (NCHW, NHWC).
713 };
714 
715 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
717 {
719  : m_Min(-6.0f)
720  , m_Max(6.0f)
721  {}
722 
724  {
725  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
726  }
727 
728  /// Minimum value.
729  float m_Min;
730  /// Maximum value.
731  float m_Max;
732 };
733 
734 /// A FillDescriptor for the FillLayer
736 {
738  : m_Value(0)
739  {}
740 
741  FillDescriptor(const float& value)
742  : m_Value(value)
743  {}
744 
745  bool operator ==(const FillDescriptor& rhs) const
746  {
747  return m_Value == rhs.m_Value;
748  }
749 
750  float m_Value;
751 };
752 
753 /// A GatherDescriptor for the GatherLayer.
755 {
757  : m_Axis(0)
758  {}
759 
760  GatherDescriptor(int32_t axis)
761  : m_Axis(axis)
762  {}
763 
764  bool operator ==(const GatherDescriptor& rhs) const
765  {
766  return m_Axis == rhs.m_Axis;
767  }
768 
769  /// The axis in params to gather indices from
770  int32_t m_Axis;
771 };
772 
773 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
775 {
777  : m_TargetWidth(0)
778  , m_TargetHeight(0)
779  , m_DataLayout(DataLayout::NCHW)
780  , m_AlignCorners(false)
781  , m_HalfPixelCenters(false)
782  {}
783 
784  bool operator ==(const ResizeBilinearDescriptor& rhs) const
785  {
786  return m_TargetWidth == rhs.m_TargetWidth &&
787  m_TargetHeight == rhs.m_TargetHeight &&
788  m_DataLayout == rhs.m_DataLayout &&
789  m_AlignCorners == rhs.m_AlignCorners &&
790  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
791  }
792 
793  /// Target width value.
794  uint32_t m_TargetWidth;
795  /// Target height value.
796  uint32_t m_TargetHeight;
797  /// The data layout to be used (NCHW, NHWC).
799  /// Aligned corners
801  /// Half Pixel Centers
803 };
804 
805 /// A ResizeDescriptor for the ResizeLayer.
807 {
809  : m_TargetWidth(0)
810  , m_TargetHeight(0)
811  , m_Method(ResizeMethod::NearestNeighbor)
812  , m_DataLayout(DataLayout::NCHW)
813  , m_AlignCorners(false)
814  , m_HalfPixelCenters(false)
815  {}
816 
817  bool operator ==(const ResizeDescriptor& rhs) const
818  {
819  return m_TargetWidth == rhs.m_TargetWidth &&
820  m_TargetHeight == rhs.m_TargetHeight &&
821  m_Method == rhs.m_Method &&
822  m_DataLayout == rhs.m_DataLayout &&
823  m_AlignCorners == rhs.m_AlignCorners &&
824  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
825  }
826 
827  /// Target width value.
828  uint32_t m_TargetWidth;
829  /// Target height value.
830  uint32_t m_TargetHeight;
831  /// The Interpolation method to use
832  /// (Bilinear, NearestNeighbor).
834  /// The data layout to be used (NCHW, NHWC).
836  /// Aligned corners
838  /// Half Pixel Centers
840 };
841 
842 
843 /// A ReshapeDescriptor for the ReshapeLayer.
845 {
847  : m_TargetShape()
848  {}
849 
851  : m_TargetShape(shape)
852  {}
853 
854  bool operator ==(const ReshapeDescriptor& rhs) const
855  {
856  return m_TargetShape == rhs.m_TargetShape;
857  }
858 
859  /// Target shape value.
861 };
862 
863 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
865 {
867  : m_BlockShape({1, 1})
868  , m_PadList({{0, 0}, {0, 0}})
869  , m_DataLayout(DataLayout::NCHW)
870  {}
871 
872  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
873  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
874  : m_BlockShape(blockShape)
875  , m_PadList(padList)
876  , m_DataLayout(DataLayout::NCHW)
877  {}
878 
879  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
880  {
881  return m_BlockShape == rhs.m_BlockShape &&
882  m_PadList == rhs.m_PadList &&
883  m_DataLayout == rhs.m_DataLayout;
884  }
885 
886  /// Block shape value.
887  std::vector<unsigned int> m_BlockShape;
888  /// @brief Specifies the padding values for the input dimension:
889  /// heightPad{top, bottom} widthPad{left, right}.
890  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
891  /// The data layout to be used (NCHW, NHWC).
893 };
894 
895 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
897 {
900  {}
901 
902  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
903  : m_BlockSize(blockSize)
904  , m_DataLayout(dataLayout)
905  {}
906 
907  bool operator ==(const SpaceToDepthDescriptor& rhs) const
908  {
909  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
910  }
911 
912  /// Scalar specifying the input block size. It must be >= 1
913  unsigned int m_BlockSize;
914 
915  /// The data layout to be used (NCHW, NHWC).
917 };
918 
919 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
921 
922 /// An LstmDescriptor for the LstmLayer.
924 {
926  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
927  , m_ClippingThresCell(0.0)
928  , m_ClippingThresProj(0.0)
929  , m_CifgEnabled(true)
930  , m_PeepholeEnabled(false)
931  , m_ProjectionEnabled(false)
932  , m_LayerNormEnabled(false)
933  , m_TimeMajor(false)
934  {}
935 
936  bool operator ==(const LstmDescriptor& rhs) const
937  {
938  return m_ActivationFunc == rhs.m_ActivationFunc &&
939  m_ClippingThresCell == rhs.m_ClippingThresCell &&
940  m_ClippingThresProj == rhs.m_ClippingThresProj &&
941  m_CifgEnabled == rhs.m_CifgEnabled &&
942  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
943  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
944  m_TimeMajor == rhs.m_TimeMajor;
945  }
946 
947  /// @brief The activation function to use.
948  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
950  /// Clipping threshold value for the cell state.
952  /// Clipping threshold value for the projection.
954  /// Enable/disable cifg (coupled input & forget gate).
956  /// Enable/disable peephole.
958  /// Enable/disable the projection layer.
960  /// Enable/disable layer normalization
962  /// Enable/disable time major
964 };
965 
967 
968 /// A MeanDescriptor for the MeanLayer.
970 {
972  : m_Axis()
973  , m_KeepDims(false)
974  {}
975 
976  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
977  : m_Axis(axis)
978  , m_KeepDims(keepDims)
979  {}
980 
981  bool operator ==(const MeanDescriptor& rhs) const
982  {
983  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
984  }
985 
986  /// Values for the dimensions to reduce.
987  std::vector<unsigned int> m_Axis;
988  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
990 };
991 
992 /// A PadDescriptor for the PadLayer.
994 {
995  PadDescriptor() : m_PadValue(0)
996  {}
997 
998  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
999  : m_PadList(padList)
1000  , m_PadValue(padValue)
1001  {}
1002 
1003  bool operator ==(const PadDescriptor& rhs) const
1004  {
1005  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
1006  }
1007 
1008  /// @brief Specifies the padding for input dimension.
1009  /// First is the number of values to add before the tensor in the dimension.
1010  /// Second is the number of values to add after the tensor in the dimension.
1011  /// The number of pairs should match the number of dimensions in the input tensor.
1012  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1013 
1014  /// Optional value to use for padding, defaults to 0
1015  float m_PadValue;
1016 };
1017 
1018 /// A SliceDescriptor for the SliceLayer.
1020 {
1021  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1022  : m_Begin(begin)
1023  , m_Size(size)
1024  {}
1025 
1027  {}
1028 
1029  bool operator ==(const SliceDescriptor& rhs) const
1030  {
1031  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1032  }
1033 
1034  /// Beginning indices of the slice in each dimension.
1035  std::vector<unsigned int> m_Begin;
1036 
1037  /// Size of the slice in each dimension.
1038  std::vector<unsigned int> m_Size;
1039 };
1040 
1041 /// A StackDescriptor for the StackLayer.
1043 {
1045  : m_Axis(0)
1046  , m_NumInputs(0)
1047  , m_InputShape()
1048  {}
1049 
1050  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1051  : m_Axis(axis)
1052  , m_NumInputs(numInputs)
1053  , m_InputShape(inputShape)
1054  {}
1055 
1056  bool operator ==(const StackDescriptor& rhs) const
1057  {
1058  return m_Axis == rhs.m_Axis &&
1059  m_NumInputs == rhs.m_NumInputs &&
1060  m_InputShape == rhs.m_InputShape;
1061  }
1062 
1063  /// 0-based axis along which to stack the input tensors.
1064  uint32_t m_Axis;
1065  /// Number of input tensors.
1066  uint32_t m_NumInputs;
1067  /// Required shape of all input tensors.
1069 };
1070 
1071 /// A StandInDescriptor for the StandIn layer
1073 {
1075 
1076  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1077  : m_NumInputs(numInputs)
1078  , m_NumOutputs(numOutputs)
1079  {}
1080 
1081  bool operator ==(const StandInDescriptor& rhs) const
1082  {
1083  return m_NumInputs == rhs.m_NumInputs &&
1084  m_NumOutputs == rhs.m_NumOutputs;
1085  }
1086 
1087  /// Number of input tensors
1088  uint32_t m_NumInputs = 0;
1089  /// Number of output tensors
1090  uint32_t m_NumOutputs = 0;
1091 };
1092 
1093 /// A StridedSliceDescriptor for the StridedSliceLayer.
1095 {
1096  StridedSliceDescriptor(const std::vector<int>& begin,
1097  const std::vector<int>& end,
1098  const std::vector<int>& stride)
1099  : m_Begin(begin)
1100  , m_End(end)
1101  , m_Stride(stride)
1102  , m_BeginMask(0)
1103  , m_EndMask(0)
1104  , m_ShrinkAxisMask(0)
1105  , m_EllipsisMask(0)
1106  , m_NewAxisMask(0)
1107  , m_DataLayout(DataLayout::NCHW)
1108  {}
1109 
1111  : StridedSliceDescriptor({}, {}, {})
1112  {}
1113 
1114  bool operator ==(const StridedSliceDescriptor& rhs) const
1115  {
1116  return m_Begin == rhs.m_Begin &&
1117  m_End == rhs.m_End &&
1118  m_Stride == rhs.m_Stride &&
1119  m_BeginMask == rhs.m_BeginMask &&
1120  m_EndMask == rhs.m_EndMask &&
1121  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1122  m_EllipsisMask == rhs.m_EllipsisMask &&
1123  m_NewAxisMask == rhs.m_NewAxisMask &&
1124  m_DataLayout == rhs.m_DataLayout;
1125  }
1126 
1127  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1128  int GetStopForAxis(const TensorShape& inputShape,
1129  unsigned int axis,
1130  int startForAxis) const;
1131 
1132  /// Begin values for the input that will be sliced.
1133  std::vector<int> m_Begin;
1134  /// End values for the input that will be sliced.
1135  std::vector<int> m_End;
1136  /// Stride values for the input that will be sliced.
1137  std::vector<int> m_Stride;
1138 
1139  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1140  /// range is used for the dimension.
1141  int32_t m_BeginMask;
1142  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1143  /// is used for the dimension.
1144  int32_t m_EndMask;
1145  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1147  /// Ellipsis mask value.
1149  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1150  /// a new 1 dimension is inserted to this location of the output tensor.
1151  int32_t m_NewAxisMask;
1152 
1153  /// The data layout to be used (NCHW, NHWC).
1155 };
1156 
1157 /// A PreCompiledDescriptor for the PreCompiledLayer.
1159 {
1160  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1161  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1162  {}
1163 
1164  ~PreCompiledDescriptor() = default;
1165 
1166  unsigned int m_NumInputSlots;
1167  unsigned int m_NumOutputSlots;
1168 };
1169 
1170 /// A QLstmDescriptor for the QLstmLayer.
1172 {
1174  : m_CellClip(0.0)
1175  , m_ProjectionClip(0.0)
1176  , m_CifgEnabled(true)
1177  , m_PeepholeEnabled(false)
1178  , m_ProjectionEnabled(false)
1179  , m_LayerNormEnabled(false)
1180  , m_InputIntermediateScale(0.0)
1181  , m_ForgetIntermediateScale(0.0)
1182  , m_CellIntermediateScale(0.0)
1183  , m_OutputIntermediateScale(0.0)
1184  , m_HiddenStateZeroPoint(0)
1185  , m_HiddenStateScale(0.0)
1186  {}
1187 
1188  bool operator ==(const QLstmDescriptor& rhs) const
1189  {
1190  return m_CellClip == rhs.m_CellClip &&
1191  m_ProjectionClip == rhs.m_ProjectionClip &&
1192  m_CifgEnabled == rhs.m_CifgEnabled &&
1193  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1194  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1195  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1196  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1197  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1198  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1199  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1200  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1201  m_HiddenStateScale == rhs.m_HiddenStateScale;
1202  }
1203 
1204  /// Clipping threshold value for the cell state
1205  float m_CellClip;
1206  /// Clipping threshold value for the projection
1208  /// Enable/disable CIFG (coupled input & forget gate).
1210  /// Enable/disable peephole
1212  /// Enable/disable the projection layer
1214  /// Enable/disable layer normalization
1216  /// Input intermediate quantization scale
1218  /// Forget intermediate quantization scale
1220  /// Cell intermediate quantization scale
1222  /// Output intermediate quantization scale
1224  /// Hidden State zero point
1226  /// Hidden State quantization scale
1228 };
1229 
1230 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1232 {
1234  m_PadLeft(0),
1235  m_PadRight(0),
1236  m_PadTop(0),
1237  m_PadBottom(0),
1238  m_StrideX(0),
1239  m_StrideY(0),
1240  m_BiasEnabled(false),
1241  m_DataLayout(DataLayout::NCHW),
1242  m_OutputShapeEnabled(false)
1243  {}
1244 
1246  {
1247  return m_PadLeft == rhs.m_PadLeft &&
1248  m_PadRight == rhs.m_PadRight &&
1249  m_PadTop == rhs.m_PadTop &&
1250  m_PadBottom == rhs.m_PadBottom &&
1251  m_StrideX == rhs.m_StrideX &&
1252  m_StrideY == rhs.m_StrideY &&
1253  m_BiasEnabled == rhs.m_BiasEnabled &&
1254  m_DataLayout == rhs.m_DataLayout &&
1255  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1256  m_OutputShape == rhs.m_OutputShape;
1257  }
1258 
1259  /// Padding left value in the width dimension.
1260  uint32_t m_PadLeft;
1261  /// Padding right value in the width dimension.
1262  uint32_t m_PadRight;
1263  /// Padding top value in the height dimension.
1264  uint32_t m_PadTop;
1265  /// Padding bottom value in the height dimension.
1266  uint32_t m_PadBottom;
1267  /// Stride value when proceeding through input for the width dimension.
1268  uint32_t m_StrideX;
1269  /// Stride value when proceeding through input for the height dimension.
1270  uint32_t m_StrideY;
1271  /// Enable/disable bias.
1273  /// The data layout to be used (NCHW, NHWC).
1275  /// Output shape if it has been specified.
1277  std::vector<unsigned int> m_OutputShape;
1278 };
1279 
1280 /// A TransposeDescriptor for the TransposeLayer.
1282 {
1284  : m_DimMappings{}
1285  {}
1286 
1288  : m_DimMappings(dimMappings)
1289  {}
1290 
1291  bool operator ==(const TransposeDescriptor &rhs) const
1292  {
1293  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1294  }
1295 
1296  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1297  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1299 };
1300 
1301 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1303 {
1306  {}
1307 
1309  : m_Operation(operation)
1310  {}
1311 
1312  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1313  {
1314  return m_Operation == rhs.m_Operation;
1315  }
1316 
1317  /// Specifies the logical operation to execute
1319 };
1320 
1321 /// A ReduceDescriptor for the REDUCE operators.
1323 {
1325  : m_KeepDims(false)
1326  , m_vAxis()
1327  , m_ReduceOperation(ReduceOperation::Sum)
1328  {}
1329 
1330  bool operator ==(const ReduceDescriptor& rhs) const
1331  {
1332  return m_KeepDims == rhs.m_KeepDims &&
1333  m_vAxis == rhs.m_vAxis &&
1334  m_ReduceOperation == rhs.m_ReduceOperation;
1335  }
1336 
1337  /// if true then output shape has no change.
1339  /// The indices of the dimensions to reduce.
1340  std::vector<uint32_t> m_vAxis;
1341  /// Specifies the reduction operation to execute
1343 };
1344 
1345 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_HalfPixelCenters
Half Pixel Centers.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
bool m_AlignCorners
Aligned corners.
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:53
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:33
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:161
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
bool m_TimeMajor
Enable/disable time major.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:116
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:98
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:153
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:84
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:88
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:123
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:35
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:29
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:175
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:82
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:131
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:104
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
float m_ScaleY
Center size encoding scale y.
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:167
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Definition: Descriptors.hpp:74
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:66
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.