ArmNN
 20.08
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 
13 #include "Tensor.hpp"
14 #include "Types.hpp"
15 
16 namespace armnn
17 {
18 
19 /// An ActivationDescriptor for the ActivationLayer.
21 {
24  , m_A(0)
25  , m_B(0)
26  {}
27 
29  float a = 0,
30  float b = 0)
31  : m_Function(activation)
32  , m_A(a)
33  , m_B(b)
34  {}
35 
36  bool operator ==(const ActivationDescriptor &rhs) const
37  {
38  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
39  }
40 
41  /// @brief The activation function to use
42  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
44  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
45  float m_A;
46  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
47  float m_B;
48 };
49 
50 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
52 {
55  , m_Axis(-1)
56  {}
57 
58  bool operator ==(const ArgMinMaxDescriptor &rhs) const
59  {
60  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis;
61  }
62 
63  /// Specify if the function is to find Min or Max.
65  /// Axis to reduce across the input tensor.
66  int m_Axis;
67 };
68 
69 /// A ComparisonDescriptor for the ComparisonLayer
71 {
74  {}
75 
77  : m_Operation(operation)
78  {}
79 
80  bool operator ==(const ComparisonDescriptor &rhs) const
81  {
82  return m_Operation == rhs.m_Operation;
83  }
84 
85  /// Specifies the comparison operation to execute
87 };
88 
89 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
91 {
94  {}
95 
97  : m_Operation(operation)
98  {}
99 
101  {
102  return m_Operation == rhs.m_Operation;
103  }
104 
105  /// Specifies the elementwiseUnary operation to execute
107 };
108 
109 /// A PermuteDescriptor for the PermuteLayer.
111 {
113  : m_DimMappings{}
114  {}
115 
117  : m_DimMappings(dimMappings)
118  {}
119 
120  bool operator ==(const PermuteDescriptor &rhs) const
121  {
122  return m_DimMappings.IsEqual(rhs.m_DimMappings);
123  }
124 
125  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
126  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
128 };
129 
130 /// A SoftmaxDescriptor for the SoftmaxLayer.
132 {
134  : m_Beta(1.0f)
135  , m_Axis(-1)
136  {}
137 
138  bool operator ==(const SoftmaxDescriptor& rhs) const
139  {
140  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
141  }
142 
143  /// Exponentiation value.
144  float m_Beta;
145  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
146  int m_Axis;
147 };
148 
149 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
151 
152 /// @brief An OriginsDescriptor for the ConcatLayer.
153 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
154 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
156 {
158  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
159  OriginsDescriptor(const OriginsDescriptor& other);
161 
163 
164  OriginsDescriptor& operator=(OriginsDescriptor rhs);
165 
166  bool operator ==(const OriginsDescriptor& rhs) const;
167 
168  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
169  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
170  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
171  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
172  /// Get the number of views.
173  uint32_t GetNumViews() const;
174  /// Get the number of dimensions.
175  uint32_t GetNumDimensions() const;
176  /// Return the view origin at the int value idx.
177  const uint32_t* GetViewOrigin(uint32_t idx) const;
178  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
179  /// The number of views must match number of elements in the new ordering array.
180  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
181  /// Swap the ViewsDescriptor values first and second.
182  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
183  /// Set the concatenation axis value.
184  void SetConcatAxis(unsigned int concatAxis);
185  /// Get the concatenation axis value.
186  unsigned int GetConcatAxis() const;
187 
188 private:
189  unsigned int m_ConcatAxis;
190  uint32_t m_NumViews;
191  uint32_t m_NumDimensions;
192  uint32_t** m_ViewOrigins;
193 };
194 
195 /// @brief A ViewsDescriptor for the SplitterLayer.
196 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
197 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
199 {
200  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
201  ViewsDescriptor(const ViewsDescriptor& other);
202  ViewsDescriptor();
204 
205  ~ViewsDescriptor();
206 
207  ViewsDescriptor& operator=(ViewsDescriptor rhs);
208 
209  bool operator ==(const ViewsDescriptor& rhs) const;
210 
211  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
212  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
213  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
214  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
215  /// @brief Set the size of the views. The arguments are: view, dimension, value.
216  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
217  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
218  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
219 
220  /// Get the number of views.
221  uint32_t GetNumViews() const;
222  /// Get the number of dimensions.
223  uint32_t GetNumDimensions() const;
224  /// Get the view origin at the int value idx.
225  const uint32_t* GetViewOrigin(uint32_t idx) const;
226  /// Get the view sizes at the int value idx.
227  const uint32_t* GetViewSizes(uint32_t idx) const;
228  /// Get the View Origins
229  const OriginsDescriptor& GetOrigins() const;
230 
231  /// Swap the ViewsDescriptor value first and second.
232  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
233 private:
234  OriginsDescriptor m_Origins;
235  uint32_t** m_ViewSizes;
236 };
237 
238 template <typename TensorShapeIt>
239 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
241  TensorShapeIt last,
242  unsigned int concatenationDimension)
243 {
244  return CreateDescriptorForConcatenation(first, last, concatenationDimension);
245 }
246 
247 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
248 /// concatenation of a number of input tensors.
249 template <typename TensorShapeIt>
251  TensorShapeIt last,
252  unsigned int concatenationDimension)
253 {
254  auto numInputs = std::distance(first, last);
255 
256  if (numInputs < 2)
257  {
258  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
259  }
260 
261  const auto& firstInputShape = *first;
262 
263  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
264  for (auto it = first + 1; it != last; ++it)
265  {
266  if (it->GetNumDimensions() != numDimensions)
267  {
268  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
269  }
270  }
271 
272  if (concatenationDimension >= numDimensions)
273  {
274  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
275  }
276 
277  for (auto it = first; it != last; ++it)
278  {
279  for (unsigned int d = 0; d < numDimensions; ++d)
280  {
281  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
282  if (!dimSizeOk)
283  {
284  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
285  " except the concatenation dimension");
286  }
287  }
288  }
289 
290  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
291  viewsDescriptor.SetConcatAxis(concatenationDimension);
292 
293  uint32_t viewIndex = 0u;
294  uint32_t coordAlongConcatDim = 0u;
295  for (auto it = first; it != last; ++it)
296  {
297  const auto& inputShape = *it;
298 
299  for (unsigned int i = 0; i < concatenationDimension; ++i)
300  {
301  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
302  }
303 
304  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
305  unsigned int dimSize = inputShape[concatenationDimension];
306  coordAlongConcatDim += dimSize;
307 
308 
309  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
310  {
311  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
312  }
313 
314  ++viewIndex;
315  }
316 
317  return viewsDescriptor;
318 }
319 
320 /// A Pooling2dDescriptor for the Pooling2dLayer.
322 {
324  : m_PoolType(PoolingAlgorithm::Max)
325  , m_PadLeft(0)
326  , m_PadRight(0)
327  , m_PadTop(0)
328  , m_PadBottom(0)
329  , m_PoolWidth(0)
330  , m_PoolHeight(0)
331  , m_StrideX(0)
332  , m_StrideY(0)
333  , m_OutputShapeRounding(OutputShapeRounding::Floor)
334  , m_PaddingMethod(PaddingMethod::Exclude)
335  , m_DataLayout(DataLayout::NCHW)
336  {}
337 
338  bool operator ==(const Pooling2dDescriptor& rhs) const
339  {
340  return m_PoolType == rhs.m_PoolType &&
341  m_PadLeft == rhs.m_PadLeft &&
342  m_PadRight == rhs.m_PadRight &&
343  m_PadTop == rhs.m_PadTop &&
344  m_PadBottom == rhs.m_PadBottom &&
345  m_PoolWidth == rhs.m_PoolWidth &&
346  m_PoolHeight == rhs.m_PoolHeight &&
347  m_StrideX == rhs.m_StrideX &&
348  m_StrideY == rhs.m_StrideY &&
349  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
350  m_PaddingMethod == rhs.m_PaddingMethod &&
351  m_DataLayout == rhs.m_DataLayout;
352  }
353 
354  /// The pooling algorithm to use (Max. Average, L2).
356  /// Padding left value in the width dimension.
357  uint32_t m_PadLeft;
358  /// Padding right value in the width dimension.
359  uint32_t m_PadRight;
360  /// Padding top value in the height dimension.
361  uint32_t m_PadTop;
362  /// Padding bottom value in the height dimension.
363  uint32_t m_PadBottom;
364  /// Pooling width value.
365  uint32_t m_PoolWidth;
366  /// Pooling height value.
367  uint32_t m_PoolHeight;
368  /// Stride value when proceeding through input for the width dimension.
369  uint32_t m_StrideX;
370  /// Stride value when proceeding through input for the height dimension.
371  uint32_t m_StrideY;
372  /// The rounding method for the output shape. (Floor, Ceiling).
374  /// The padding method to be used. (Exclude, IgnoreValue).
376  /// The data layout to be used (NCHW, NHWC).
378 };
379 
380 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
382 {
384  : m_BiasEnabled(false)
385  , m_TransposeWeightMatrix(false)
386  {}
387 
388  bool operator ==(const FullyConnectedDescriptor& rhs) const
389  {
390  return m_BiasEnabled == rhs.m_BiasEnabled && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix;
391  }
392 
393  /// Enable/disable bias.
395  /// Enable/disable transpose weight matrix.
397 };
398 
399 /// A Convolution2dDescriptor for the Convolution2dLayer.
401 {
403  : m_PadLeft(0)
404  , m_PadRight(0)
405  , m_PadTop(0)
406  , m_PadBottom(0)
407  , m_StrideX(0)
408  , m_StrideY(0)
409  , m_DilationX(1)
410  , m_DilationY(1)
411  , m_BiasEnabled(false)
412  , m_DataLayout(DataLayout::NCHW)
413  {}
414 
415  bool operator ==(const Convolution2dDescriptor& rhs) const
416  {
417  return m_PadLeft == rhs.m_PadLeft &&
418  m_PadRight == rhs.m_PadRight &&
419  m_PadTop == rhs.m_PadTop &&
420  m_PadBottom == rhs.m_PadBottom &&
421  m_StrideX == rhs.m_StrideX &&
422  m_StrideY == rhs.m_StrideY &&
423  m_DilationX == rhs.m_DilationX &&
424  m_DilationY == rhs.m_DilationY &&
425  m_BiasEnabled == rhs.m_BiasEnabled &&
426  m_DataLayout == rhs.m_DataLayout;
427  }
428 
429  /// Padding left value in the width dimension.
430  uint32_t m_PadLeft;
431  /// Padding right value in the width dimension.
432  uint32_t m_PadRight;
433  /// Padding top value in the height dimension.
434  uint32_t m_PadTop;
435  /// Padding bottom value in the height dimension.
436  uint32_t m_PadBottom;
437  /// Stride value when proceeding through input for the width dimension.
438  uint32_t m_StrideX;
439  /// Stride value when proceeding through input for the height dimension.
440  uint32_t m_StrideY;
441  /// Dilation along x axis
442  uint32_t m_DilationX;
443  /// Dilation along y axis
444  uint32_t m_DilationY;
445  /// Enable/disable bias.
447  /// The data layout to be used (NCHW, NHWC).
449 };
450 
451 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
453 {
455  : m_PadLeft(0)
456  , m_PadRight(0)
457  , m_PadTop(0)
458  , m_PadBottom(0)
459  , m_StrideX(0)
460  , m_StrideY(0)
461  , m_DilationX(1)
462  , m_DilationY(1)
463  , m_BiasEnabled(false)
464  , m_DataLayout(DataLayout::NCHW)
465  {}
466 
468  {
469  return m_PadLeft == rhs.m_PadLeft &&
470  m_PadRight == rhs.m_PadRight &&
471  m_PadTop == rhs.m_PadTop &&
472  m_PadBottom == rhs.m_PadBottom &&
473  m_StrideX == rhs.m_StrideX &&
474  m_StrideY == rhs.m_StrideY &&
475  m_DilationX == rhs.m_DilationX &&
476  m_DilationY == rhs.m_DilationY &&
477  m_BiasEnabled == rhs.m_BiasEnabled &&
478  m_DataLayout == rhs.m_DataLayout;
479  }
480 
481  /// Padding left value in the width dimension.
482  uint32_t m_PadLeft;
483  /// Padding right value in the width dimension.
484  uint32_t m_PadRight;
485  /// Padding top value in the height dimension.
486  uint32_t m_PadTop;
487  /// Padding bottom value in the height dimension.
488  uint32_t m_PadBottom;
489  /// Stride value when proceeding through input for the width dimension.
490  uint32_t m_StrideX;
491  /// Stride value when proceeding through input for the height dimension.
492  uint32_t m_StrideY;
493  /// Dilation factor value for width dimension.
494  uint32_t m_DilationX;
495  /// Dilation factor value for height dimension.
496  uint32_t m_DilationY;
497  /// Enable/disable bias.
499  /// The data layout to be used (NCHW, NHWC).
501 };
502 
504 {
506  : m_MaxDetections(0)
507  , m_MaxClassesPerDetection(1)
508  , m_DetectionsPerClass(1)
509  , m_NmsScoreThreshold(0)
510  , m_NmsIouThreshold(0)
511  , m_NumClasses(0)
512  , m_UseRegularNms(false)
513  , m_ScaleX(0)
514  , m_ScaleY(0)
515  , m_ScaleW(0)
516  , m_ScaleH(0)
517  {}
518 
520  {
521  return m_MaxDetections == rhs.m_MaxDetections &&
522  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
523  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
524  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
525  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
526  m_NumClasses == rhs.m_NumClasses &&
527  m_UseRegularNms == rhs.m_UseRegularNms &&
528  m_ScaleX == rhs.m_ScaleX &&
529  m_ScaleY == rhs.m_ScaleY &&
530  m_ScaleW == rhs.m_ScaleW &&
531  m_ScaleH == rhs.m_ScaleH;
532  }
533 
534  /// Maximum numbers of detections.
535  uint32_t m_MaxDetections;
536  /// Maximum numbers of classes per detection, used in Fast NMS.
538  /// Detections per classes, used in Regular NMS.
540  /// NMS score threshold.
542  /// Intersection over union threshold.
544  /// Number of classes.
545  uint32_t m_NumClasses;
546  /// Use Regular NMS.
548  /// Center size encoding scale x.
549  float m_ScaleX;
550  /// Center size encoding scale y.
551  float m_ScaleY;
552  /// Center size encoding scale weight.
553  float m_ScaleW;
554  /// Center size encoding scale height.
555  float m_ScaleH;
556 };
557 
558 /// A NormalizationDescriptor for the NormalizationLayer.
560 {
562  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
563  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
564  , m_NormSize(0)
565  , m_Alpha(0.f)
566  , m_Beta(0.f)
567  , m_K(0.f)
568  , m_DataLayout(DataLayout::NCHW)
569  {}
570 
571  bool operator ==(const NormalizationDescriptor& rhs) const
572  {
573  return m_NormChannelType == rhs.m_NormChannelType &&
574  m_NormMethodType == rhs.m_NormMethodType &&
575  m_NormSize == rhs.m_NormSize &&
576  m_Alpha == rhs.m_Alpha &&
577  m_Beta == rhs.m_Beta &&
578  m_K == rhs.m_K &&
579  m_DataLayout == rhs.m_DataLayout;
580  }
581 
582  /// Normalization channel algorithm to use (Across, Within).
584  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
586  /// Depth radius value.
587  uint32_t m_NormSize;
588  /// Alpha value for the normalization equation.
589  float m_Alpha;
590  /// Beta value for the normalization equation.
591  float m_Beta;
592  /// Kappa value used for the across channel normalization equation.
593  float m_K;
594  /// The data layout to be used (NCHW, NHWC).
596 };
597 
598 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
600 {
602  : m_Eps(1e-12f)
603  , m_DataLayout(DataLayout::NCHW)
604  {}
605 
606  bool operator ==(const L2NormalizationDescriptor& rhs) const
607  {
608  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
609  }
610 
611  /// Used to avoid dividing by zero.
612  float m_Eps;
613  /// The data layout to be used (NCHW, NHWC).
615 };
616 
617 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
619 {
621  : m_Eps(0.0001f)
622  , m_DataLayout(DataLayout::NCHW)
623  {}
624 
626  {
627  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
628  }
629 
630  /// Value to add to the variance. Used to avoid dividing by zero.
631  float m_Eps;
632  /// The data layout to be used (NCHW, NHWC).
634 };
635 
636 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
638 {
640  : m_Gamma(1.0f)
641  , m_Beta(0.0f)
642  , m_Eps(1e-12f)
643  , m_DataLayout(DataLayout::NCHW)
644  {}
645 
647  {
648  return m_Gamma == rhs.m_Gamma &&
649  m_Beta == rhs.m_Beta &&
650  m_Eps == rhs.m_Eps &&
651  m_DataLayout == rhs.m_DataLayout;
652  }
653 
654  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
655  float m_Gamma;
656  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
657  float m_Beta;
658  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
659  float m_Eps;
660  /// The data layout to be used (NCHW, NHWC).
662 };
663 
664 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
666 {
668  : m_BlockShape({1, 1})
669  , m_Crops({{0, 0}, {0, 0}})
670  , m_DataLayout(DataLayout::NCHW)
671  {}
672 
673  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
674  std::vector<std::pair<unsigned int, unsigned int>> crops)
675  : m_BlockShape(blockShape)
676  , m_Crops(crops)
677  , m_DataLayout(DataLayout::NCHW)
678  {}
679 
680  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
681  {
682  return m_BlockShape == rhs.m_BlockShape &&
683  m_Crops == rhs.m_Crops &&
684  m_DataLayout == rhs.m_DataLayout;
685  }
686 
687  /// Block shape values.
688  std::vector<unsigned int> m_BlockShape;
689  /// The values to crop from the input dimension.
690  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
691  /// The data layout to be used (NCHW, NHWC).
693 };
694 
695 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
697 {
699  : m_Min(-6.0f)
700  , m_Max(6.0f)
701  {}
702 
704  {
705  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
706  }
707 
708  /// Minimum value.
709  float m_Min;
710  /// Maximum value.
711  float m_Max;
712 };
713 
714 /// A FillDescriptor for the FillLayer
716 {
718  : m_Value(0)
719  {}
720 
721  FillDescriptor(const float& value)
722  : m_Value(value)
723  {}
724 
725  bool operator ==(const FillDescriptor& rhs) const
726  {
727  return m_Value == rhs.m_Value;
728  }
729 
730  float m_Value;
731 };
732 
733 /// A GatherDescriptor for the GatherLayer.
735 {
737  : m_Axis(0)
738  {}
739 
740  GatherDescriptor(int32_t axis)
741  : m_Axis(axis)
742  {}
743 
744  bool operator ==(const GatherDescriptor& rhs) const
745  {
746  return m_Axis == rhs.m_Axis;
747  }
748 
749  /// The axis in params to gather indices from
750  int32_t m_Axis;
751 };
752 
753 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
755 {
757  : m_TargetWidth(0)
758  , m_TargetHeight(0)
759  , m_DataLayout(DataLayout::NCHW)
760  , m_AlignCorners(false)
761  , m_HalfPixelCenters(false)
762  {}
763 
764  /// Target width value.
765  uint32_t m_TargetWidth;
766  /// Target height value.
767  uint32_t m_TargetHeight;
768  /// The data layout to be used (NCHW, NHWC).
770  /// Aligned corners
772  /// Half Pixel Centers
774 };
775 
776 /// A ResizeDescriptor for the ResizeLayer.
778 {
780  : m_TargetWidth(0)
781  , m_TargetHeight(0)
782  , m_Method(ResizeMethod::NearestNeighbor)
783  , m_DataLayout(DataLayout::NCHW)
784  , m_AlignCorners(false)
785  , m_HalfPixelCenters(false)
786  {}
787 
788  bool operator ==(const ResizeDescriptor& rhs) const
789  {
790  return m_TargetWidth == rhs.m_TargetWidth &&
791  m_TargetHeight == rhs.m_TargetHeight &&
792  m_Method == rhs.m_Method &&
793  m_DataLayout == rhs.m_DataLayout &&
794  m_AlignCorners == rhs.m_AlignCorners &&
795  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
796  }
797 
798  /// Target width value.
799  uint32_t m_TargetWidth;
800  /// Target height value.
801  uint32_t m_TargetHeight;
802  /// The Interpolation method to use
803  /// (Bilinear, NearestNeighbor).
805  /// The data layout to be used (NCHW, NHWC).
807  /// Aligned corners
809  /// Half Pixel Centers
811 };
812 
813 
814 /// A ReshapeDescriptor for the ReshapeLayer.
816 {
818  : m_TargetShape()
819  {}
820 
822  : m_TargetShape(shape)
823  {}
824 
825  bool operator ==(const ReshapeDescriptor& rhs) const
826  {
827  return m_TargetShape == rhs.m_TargetShape;
828  }
829 
830  /// Target shape value.
832 };
833 
834 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
836 {
838  : m_BlockShape({1, 1})
839  , m_PadList({{0, 0}, {0, 0}})
840  , m_DataLayout(DataLayout::NCHW)
841  {}
842 
843  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
844  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
845  : m_BlockShape(blockShape)
846  , m_PadList(padList)
847  , m_DataLayout(DataLayout::NCHW)
848  {}
849 
850  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
851  {
852  return m_BlockShape == rhs.m_BlockShape &&
853  m_PadList == rhs.m_PadList &&
854  m_DataLayout == rhs.m_DataLayout;
855  }
856 
857  /// Block shape value.
858  std::vector<unsigned int> m_BlockShape;
859  /// @brief Specifies the padding values for the input dimension:
860  /// heightPad{top, bottom} widthPad{left, right}.
861  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
862  /// The data layout to be used (NCHW, NHWC).
864 };
865 
866 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
868 {
871  {}
872 
873  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
874  : m_BlockSize(blockSize)
875  , m_DataLayout(dataLayout)
876  {}
877 
878  bool operator ==(const SpaceToDepthDescriptor& rhs) const
879  {
880  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
881  }
882 
883  /// Scalar specifying the input block size. It must be >= 1
884  unsigned int m_BlockSize;
885 
886  /// The data layout to be used (NCHW, NHWC).
888 };
889 
890 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
892 
893 /// An LstmDescriptor for the LstmLayer.
895 {
897  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
898  , m_ClippingThresCell(0.0)
899  , m_ClippingThresProj(0.0)
900  , m_CifgEnabled(true)
901  , m_PeepholeEnabled(false)
902  , m_ProjectionEnabled(false)
903  , m_LayerNormEnabled(false)
904  {}
905 
906  bool operator ==(const LstmDescriptor& rhs) const
907  {
908  return m_ActivationFunc == rhs.m_ActivationFunc &&
909  m_ClippingThresCell == rhs.m_ClippingThresCell &&
910  m_ClippingThresProj == rhs.m_ClippingThresProj &&
911  m_CifgEnabled == rhs.m_CifgEnabled &&
912  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
913  m_LayerNormEnabled == rhs.m_LayerNormEnabled;
914  }
915 
916  /// @brief The activation function to use.
917  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
919  /// Clipping threshold value for the cell state.
921  /// Clipping threshold value for the projection.
923  /// Enable/disable cifg (coupled input & forget gate).
925  /// Enable/disable peephole.
927  /// Enable/disable the projection layer.
929  /// Enable/disable layer normalization
931 };
932 
933 /// A MeanDescriptor for the MeanLayer.
935 {
937  : m_Axis()
938  , m_KeepDims(false)
939  {}
940 
941  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
942  : m_Axis(axis)
943  , m_KeepDims(keepDims)
944  {}
945 
946  bool operator ==(const MeanDescriptor& rhs) const
947  {
948  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
949  }
950 
951  /// Values for the dimensions to reduce.
952  std::vector<unsigned int> m_Axis;
953  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
955 };
956 
957 /// A PadDescriptor for the PadLayer.
959 {
960  PadDescriptor() : m_PadValue(0)
961  {}
962 
963  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
964  : m_PadList(padList)
965  , m_PadValue(padValue)
966  {}
967 
968  bool operator ==(const PadDescriptor& rhs) const
969  {
970  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
971  }
972 
973  /// @brief Specifies the padding for input dimension.
974  /// First is the number of values to add before the tensor in the dimension.
975  /// Second is the number of values to add after the tensor in the dimension.
976  /// The number of pairs should match the number of dimensions in the input tensor.
977  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
978 
979  /// Optional value to use for padding, defaults to 0
980  float m_PadValue;
981 };
982 
983 /// A SliceDescriptor for the SliceLayer.
985 {
986  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
987  : m_Begin(begin)
988  , m_Size(size)
989  {}
990 
992  {}
993 
994  bool operator ==(const SliceDescriptor& rhs) const
995  {
996  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
997  }
998 
999  /// Beginning indices of the slice in each dimension.
1000  std::vector<unsigned int> m_Begin;
1001 
1002  /// Size of the slice in each dimension.
1003  std::vector<unsigned int> m_Size;
1004 };
1005 
1006 /// A StackDescriptor for the StackLayer.
1008 {
1010  : m_Axis(0)
1011  , m_NumInputs(0)
1012  , m_InputShape()
1013  {}
1014 
1015  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1016  : m_Axis(axis)
1017  , m_NumInputs(numInputs)
1018  , m_InputShape(inputShape)
1019  {}
1020 
1021  bool operator ==(const StackDescriptor& rhs) const
1022  {
1023  return m_Axis == rhs.m_Axis &&
1024  m_NumInputs == rhs.m_NumInputs &&
1025  m_InputShape == rhs.m_InputShape;
1026  }
1027 
1028  /// 0-based axis along which to stack the input tensors.
1029  uint32_t m_Axis;
1030  /// Number of input tensors.
1031  uint32_t m_NumInputs;
1032  /// Required shape of all input tensors.
1034 };
1035 
1036 /// A StandInDescriptor for the StandIn layer
1038 {
1040 
1041  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1042  : m_NumInputs(numInputs)
1043  , m_NumOutputs(numOutputs)
1044  {}
1045 
1046  bool operator ==(const StandInDescriptor& rhs) const
1047  {
1048  return m_NumInputs == rhs.m_NumInputs &&
1049  m_NumOutputs == rhs.m_NumOutputs;
1050  }
1051 
1052  /// Number of input tensors
1053  uint32_t m_NumInputs = 0;
1054  /// Number of output tensors
1055  uint32_t m_NumOutputs = 0;
1056 };
1057 
1058 /// A StridedSliceDescriptor for the StridedSliceLayer.
1060 {
1061  StridedSliceDescriptor(const std::vector<int>& begin,
1062  const std::vector<int>& end,
1063  const std::vector<int>& stride)
1064  : m_Begin(begin)
1065  , m_End(end)
1066  , m_Stride(stride)
1067  , m_BeginMask(0)
1068  , m_EndMask(0)
1069  , m_ShrinkAxisMask(0)
1070  , m_EllipsisMask(0)
1071  , m_NewAxisMask(0)
1072  , m_DataLayout(DataLayout::NCHW)
1073  {}
1074 
1076  : StridedSliceDescriptor({}, {}, {})
1077  {}
1078 
1079  bool operator ==(const StridedSliceDescriptor& rhs) const
1080  {
1081  return m_Begin == rhs.m_Begin &&
1082  m_End == rhs.m_End &&
1083  m_Stride == rhs.m_Stride &&
1084  m_BeginMask == rhs.m_BeginMask &&
1085  m_EndMask == rhs.m_EndMask &&
1086  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1087  m_EllipsisMask == rhs.m_EllipsisMask &&
1088  m_NewAxisMask == rhs.m_NewAxisMask &&
1089  m_DataLayout == rhs.m_DataLayout;
1090  }
1091 
1092  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1093  int GetStopForAxis(const TensorShape& inputShape,
1094  unsigned int axis,
1095  int startForAxis) const;
1096 
1097  /// Begin values for the input that will be sliced.
1098  std::vector<int> m_Begin;
1099  /// End values for the input that will be sliced.
1100  std::vector<int> m_End;
1101  /// Stride values for the input that will be sliced.
1102  std::vector<int> m_Stride;
1103 
1104  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1105  /// range is used for the dimension.
1106  int32_t m_BeginMask;
1107  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1108  /// is used for the dimension.
1109  int32_t m_EndMask;
1110  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1112  /// Ellipsis mask value.
1114  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1115  /// a new 1 dimension is inserted to this location of the output tensor.
1116  int32_t m_NewAxisMask;
1117 
1118  /// The data layout to be used (NCHW, NHWC).
1120 };
1121 
1122 /// A PreCompiledDescriptor for the PreCompiledLayer.
1124 {
1125  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1126  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1127  {}
1128 
1129  ~PreCompiledDescriptor() = default;
1130 
1131  unsigned int m_NumInputSlots;
1132  unsigned int m_NumOutputSlots;
1133 };
1134 
1135 /// A QLstmDescriptor for the QLstmLayer.
1137 {
1139  : m_CellClip(0.0)
1140  , m_ProjectionClip(0.0)
1141  , m_CifgEnabled(true)
1142  , m_PeepholeEnabled(false)
1143  , m_ProjectionEnabled(false)
1144  , m_LayerNormEnabled(false)
1145  , m_InputIntermediateScale(0.0)
1146  , m_ForgetIntermediateScale(0.0)
1147  , m_CellIntermediateScale(0.0)
1148  , m_OutputIntermediateScale(0.0)
1149  , m_HiddenStateZeroPoint(0)
1150  , m_HiddenStateScale(0.0)
1151  {}
1152 
1153  bool operator ==(const QLstmDescriptor& rhs) const
1154  {
1155  return m_CellClip == rhs.m_CellClip &&
1156  m_ProjectionClip == rhs.m_ProjectionClip &&
1157  m_CifgEnabled == rhs.m_CifgEnabled &&
1158  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1159  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1160  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1161  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1162  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1163  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1164  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1165  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1166  m_HiddenStateScale == rhs.m_HiddenStateScale;
1167  }
1168 
1169  /// Clipping threshold value for the cell state
1170  float m_CellClip;
1171  /// Clipping threshold value for the projection
1173  /// Enable/disable CIFG (coupled input & forget gate).
1175  /// Enable/disable peephole
1177  /// Enable/disable the projection layer
1179  /// Enable/disable layer normalization
1181  /// Input intermediate quantization scale
1183  /// Forget intermediate quantization scale
1185  /// Cell intermediate quantization scale
1187  /// Output intermediate quantization scale
1189  /// Hidden State zero point
1191  /// Hidden State quantization scale
1193 };
1194 
1195 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1197 {
1199  m_PadLeft(0),
1200  m_PadRight(0),
1201  m_PadTop(0),
1202  m_PadBottom(0),
1203  m_StrideX(0),
1204  m_StrideY(0),
1205  m_BiasEnabled(false),
1206  m_DataLayout(DataLayout::NCHW),
1207  m_OutputShapeEnabled(false)
1208  {}
1209 
1211  {
1212  return m_PadLeft == rhs.m_PadLeft &&
1213  m_PadRight == rhs.m_PadRight &&
1214  m_PadTop == rhs.m_PadTop &&
1215  m_PadBottom == rhs.m_PadBottom &&
1216  m_StrideX == rhs.m_StrideX &&
1217  m_StrideY == rhs.m_StrideY &&
1218  m_BiasEnabled == rhs.m_BiasEnabled &&
1219  m_DataLayout == rhs.m_DataLayout &&
1220  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1221  m_OutputShape == rhs.m_OutputShape;
1222  }
1223 
1224  /// Padding left value in the width dimension.
1225  uint32_t m_PadLeft;
1226  /// Padding right value in the width dimension.
1227  uint32_t m_PadRight;
1228  /// Padding top value in the height dimension.
1229  uint32_t m_PadTop;
1230  /// Padding bottom value in the height dimension.
1231  uint32_t m_PadBottom;
1232  /// Stride value when proceeding through input for the width dimension.
1233  uint32_t m_StrideX;
1234  /// Stride value when proceeding through input for the height dimension.
1235  uint32_t m_StrideY;
1236  /// Enable/disable bias.
1238  /// The data layout to be used (NCHW, NHWC).
1240  /// Output shape if it has been specified.
1242  std::vector<unsigned int> m_OutputShape;
1243 };
1244 
1245 /// A TransposeDescriptor for the TransposeLayer.
1247 {
1249  : m_DimMappings{}
1250  {}
1251 
1253  : m_DimMappings(dimMappings)
1254  {}
1255 
1256  bool operator ==(const TransposeDescriptor &rhs) const
1257  {
1258  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1259  }
1260 
1261  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1262  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1264 };
1265 
1266 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
Definition: Descriptors.hpp:96
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_HalfPixelCenters
Half Pixel Centers.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
bool m_AlignCorners
Aligned corners.
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:49
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:70
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout::NHWC false
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:28
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:64
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:133
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool operator==(const ActivationDescriptor &rhs) const
Definition: Descriptors.hpp:36
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Copyright (c) 2020 ARM Limited.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:96
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:125
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:76
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:77
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
An OriginsDescriptor for the ConcatLayer.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:26
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:86
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:147
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:45
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:90
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:71
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:103
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:87
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:66
float m_ScaleY
Center size encoding scale y.
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:139
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
DetectionPostProcessDescriptor()
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:47
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:55
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.