ArmNN
 20.11
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 
13 #include "Tensor.hpp"
14 #include "Types.hpp"
15 
16 namespace armnn
17 {
18 
19 /// An ActivationDescriptor for the ActivationLayer.
21 {
24  , m_A(0)
25  , m_B(0)
26  {}
27 
29  float a = 0,
30  float b = 0)
31  : m_Function(activation)
32  , m_A(a)
33  , m_B(b)
34  {}
35 
36  bool operator ==(const ActivationDescriptor &rhs) const
37  {
38  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
39  }
40 
41  /// @brief The activation function to use
42  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
44  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
45  float m_A;
46  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
47  float m_B;
48 };
49 
50 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
52 {
55  , m_Axis(-1)
56  , m_Output_Type(armnn::DataType::Signed32)
57  {}
58 
59  bool operator ==(const ArgMinMaxDescriptor &rhs) const
60  {
61  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
62  }
63 
64  /// Specify if the function is to find Min or Max.
66  /// Axis to reduce across the input tensor.
67  int m_Axis;
68  // Tensor data type and this could be int32 or int64. Default type is int64.
70 };
71 
72 /// A ComparisonDescriptor for the ComparisonLayer
74 {
77  {}
78 
80  : m_Operation(operation)
81  {}
82 
83  bool operator ==(const ComparisonDescriptor &rhs) const
84  {
85  return m_Operation == rhs.m_Operation;
86  }
87 
88  /// Specifies the comparison operation to execute
90 };
91 
92 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
94 {
97  {}
98 
100  : m_Operation(operation)
101  {}
102 
104  {
105  return m_Operation == rhs.m_Operation;
106  }
107 
108  /// Specifies the elementwiseUnary operation to execute
110 };
111 
112 /// A PermuteDescriptor for the PermuteLayer.
114 {
116  : m_DimMappings{}
117  {}
118 
120  : m_DimMappings(dimMappings)
121  {}
122 
123  bool operator ==(const PermuteDescriptor &rhs) const
124  {
125  return m_DimMappings.IsEqual(rhs.m_DimMappings);
126  }
127 
128  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
129  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
131 };
132 
133 /// A SoftmaxDescriptor for the SoftmaxLayer.
135 {
137  : m_Beta(1.0f)
138  , m_Axis(-1)
139  {}
140 
141  bool operator ==(const SoftmaxDescriptor& rhs) const
142  {
143  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
144  }
145 
146  /// Exponentiation value.
147  float m_Beta;
148  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
149  int m_Axis;
150 };
151 
152 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
154 
155 /// @brief An OriginsDescriptor for the ConcatLayer.
156 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
157 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
159 {
161  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
162  OriginsDescriptor(const OriginsDescriptor& other);
164 
166 
167  OriginsDescriptor& operator=(OriginsDescriptor rhs);
168 
169  bool operator ==(const OriginsDescriptor& rhs) const;
170 
171  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
172  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
173  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
174  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
175  /// Get the number of views.
176  uint32_t GetNumViews() const;
177  /// Get the number of dimensions.
178  uint32_t GetNumDimensions() const;
179  /// Return the view origin at the int value idx.
180  const uint32_t* GetViewOrigin(uint32_t idx) const;
181  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
182  /// The number of views must match number of elements in the new ordering array.
183  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
184  /// Swap the ViewsDescriptor values first and second.
185  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
186  /// Set the concatenation axis value.
187  void SetConcatAxis(unsigned int concatAxis);
188  /// Get the concatenation axis value.
189  unsigned int GetConcatAxis() const;
190 
191 private:
192  unsigned int m_ConcatAxis;
193  uint32_t m_NumViews;
194  uint32_t m_NumDimensions;
195  uint32_t** m_ViewOrigins;
196 };
197 
198 /// @brief A ViewsDescriptor for the SplitterLayer.
199 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
200 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
202 {
203  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
204  ViewsDescriptor(const ViewsDescriptor& other);
205  ViewsDescriptor();
207 
208  ~ViewsDescriptor();
209 
210  ViewsDescriptor& operator=(ViewsDescriptor rhs);
211 
212  bool operator ==(const ViewsDescriptor& rhs) const;
213 
214  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
215  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
216  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
217  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
218  /// @brief Set the size of the views. The arguments are: view, dimension, value.
219  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
220  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
221  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
222 
223  /// Get the number of views.
224  uint32_t GetNumViews() const;
225  /// Get the number of dimensions.
226  uint32_t GetNumDimensions() const;
227  /// Get the view origin at the int value idx.
228  const uint32_t* GetViewOrigin(uint32_t idx) const;
229  /// Get the view sizes at the int value idx.
230  const uint32_t* GetViewSizes(uint32_t idx) const;
231  /// Get the View Origins
232  const OriginsDescriptor& GetOrigins() const;
233 
234  /// Swap the ViewsDescriptor value first and second.
235  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
236 private:
237  OriginsDescriptor m_Origins;
238  uint32_t** m_ViewSizes;
239 };
240 
241 template <typename TensorShapeIt>
242 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
244  TensorShapeIt last,
245  unsigned int concatenationDimension)
246 {
247  return CreateDescriptorForConcatenation(first, last, concatenationDimension);
248 }
249 
250 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
251 /// concatenation of a number of input tensors.
252 template <typename TensorShapeIt>
254  TensorShapeIt last,
255  unsigned int concatenationDimension)
256 {
257  auto numInputs = std::distance(first, last);
258 
259  if (numInputs < 2)
260  {
261  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
262  }
263 
264  const auto& firstInputShape = *first;
265 
266  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
267  for (auto it = first + 1; it != last; ++it)
268  {
269  if (it->GetNumDimensions() != numDimensions)
270  {
271  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
272  }
273  }
274 
275  if (concatenationDimension >= numDimensions)
276  {
277  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
278  }
279 
280  for (auto it = first; it != last; ++it)
281  {
282  for (unsigned int d = 0; d < numDimensions; ++d)
283  {
284  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
285  if (!dimSizeOk)
286  {
287  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
288  " except the concatenation dimension");
289  }
290  }
291  }
292 
293  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
294  viewsDescriptor.SetConcatAxis(concatenationDimension);
295 
296  uint32_t viewIndex = 0u;
297  uint32_t coordAlongConcatDim = 0u;
298  for (auto it = first; it != last; ++it)
299  {
300  const auto& inputShape = *it;
301 
302  for (unsigned int i = 0; i < concatenationDimension; ++i)
303  {
304  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
305  }
306 
307  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
308  unsigned int dimSize = inputShape[concatenationDimension];
309  coordAlongConcatDim += dimSize;
310 
311 
312  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
313  {
314  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
315  }
316 
317  ++viewIndex;
318  }
319 
320  return viewsDescriptor;
321 }
322 
323 /// A Pooling2dDescriptor for the Pooling2dLayer.
325 {
327  : m_PoolType(PoolingAlgorithm::Max)
328  , m_PadLeft(0)
329  , m_PadRight(0)
330  , m_PadTop(0)
331  , m_PadBottom(0)
332  , m_PoolWidth(0)
333  , m_PoolHeight(0)
334  , m_StrideX(0)
335  , m_StrideY(0)
336  , m_OutputShapeRounding(OutputShapeRounding::Floor)
337  , m_PaddingMethod(PaddingMethod::Exclude)
338  , m_DataLayout(DataLayout::NCHW)
339  {}
340 
341  bool operator ==(const Pooling2dDescriptor& rhs) const
342  {
343  return m_PoolType == rhs.m_PoolType &&
344  m_PadLeft == rhs.m_PadLeft &&
345  m_PadRight == rhs.m_PadRight &&
346  m_PadTop == rhs.m_PadTop &&
347  m_PadBottom == rhs.m_PadBottom &&
348  m_PoolWidth == rhs.m_PoolWidth &&
349  m_PoolHeight == rhs.m_PoolHeight &&
350  m_StrideX == rhs.m_StrideX &&
351  m_StrideY == rhs.m_StrideY &&
352  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
353  m_PaddingMethod == rhs.m_PaddingMethod &&
354  m_DataLayout == rhs.m_DataLayout;
355  }
356 
357  /// The pooling algorithm to use (Max. Average, L2).
359  /// Padding left value in the width dimension.
360  uint32_t m_PadLeft;
361  /// Padding right value in the width dimension.
362  uint32_t m_PadRight;
363  /// Padding top value in the height dimension.
364  uint32_t m_PadTop;
365  /// Padding bottom value in the height dimension.
366  uint32_t m_PadBottom;
367  /// Pooling width value.
368  uint32_t m_PoolWidth;
369  /// Pooling height value.
370  uint32_t m_PoolHeight;
371  /// Stride value when proceeding through input for the width dimension.
372  uint32_t m_StrideX;
373  /// Stride value when proceeding through input for the height dimension.
374  uint32_t m_StrideY;
375  /// The rounding method for the output shape. (Floor, Ceiling).
377  /// The padding method to be used. (Exclude, IgnoreValue).
379  /// The data layout to be used (NCHW, NHWC).
381 };
382 
383 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
385 {
387  : m_BiasEnabled(false)
388  , m_TransposeWeightMatrix(false)
389  {}
390 
391  bool operator ==(const FullyConnectedDescriptor& rhs) const
392  {
393  return m_BiasEnabled == rhs.m_BiasEnabled && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix;
394  }
395 
396  /// Enable/disable bias.
398  /// Enable/disable transpose weight matrix.
400 };
401 
402 /// A Convolution2dDescriptor for the Convolution2dLayer.
404 {
406  : m_PadLeft(0)
407  , m_PadRight(0)
408  , m_PadTop(0)
409  , m_PadBottom(0)
410  , m_StrideX(0)
411  , m_StrideY(0)
412  , m_DilationX(1)
413  , m_DilationY(1)
414  , m_BiasEnabled(false)
415  , m_DataLayout(DataLayout::NCHW)
416  {}
417 
418  bool operator ==(const Convolution2dDescriptor& rhs) const
419  {
420  return m_PadLeft == rhs.m_PadLeft &&
421  m_PadRight == rhs.m_PadRight &&
422  m_PadTop == rhs.m_PadTop &&
423  m_PadBottom == rhs.m_PadBottom &&
424  m_StrideX == rhs.m_StrideX &&
425  m_StrideY == rhs.m_StrideY &&
426  m_DilationX == rhs.m_DilationX &&
427  m_DilationY == rhs.m_DilationY &&
428  m_BiasEnabled == rhs.m_BiasEnabled &&
429  m_DataLayout == rhs.m_DataLayout;
430  }
431 
432  /// Padding left value in the width dimension.
433  uint32_t m_PadLeft;
434  /// Padding right value in the width dimension.
435  uint32_t m_PadRight;
436  /// Padding top value in the height dimension.
437  uint32_t m_PadTop;
438  /// Padding bottom value in the height dimension.
439  uint32_t m_PadBottom;
440  /// Stride value when proceeding through input for the width dimension.
441  uint32_t m_StrideX;
442  /// Stride value when proceeding through input for the height dimension.
443  uint32_t m_StrideY;
444  /// Dilation along x axis
445  uint32_t m_DilationX;
446  /// Dilation along y axis
447  uint32_t m_DilationY;
448  /// Enable/disable bias.
450  /// The data layout to be used (NCHW, NHWC).
452 };
453 
454 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
456 {
458  : m_PadLeft(0)
459  , m_PadRight(0)
460  , m_PadTop(0)
461  , m_PadBottom(0)
462  , m_StrideX(0)
463  , m_StrideY(0)
464  , m_DilationX(1)
465  , m_DilationY(1)
466  , m_BiasEnabled(false)
467  , m_DataLayout(DataLayout::NCHW)
468  {}
469 
471  {
472  return m_PadLeft == rhs.m_PadLeft &&
473  m_PadRight == rhs.m_PadRight &&
474  m_PadTop == rhs.m_PadTop &&
475  m_PadBottom == rhs.m_PadBottom &&
476  m_StrideX == rhs.m_StrideX &&
477  m_StrideY == rhs.m_StrideY &&
478  m_DilationX == rhs.m_DilationX &&
479  m_DilationY == rhs.m_DilationY &&
480  m_BiasEnabled == rhs.m_BiasEnabled &&
481  m_DataLayout == rhs.m_DataLayout;
482  }
483 
484  /// Padding left value in the width dimension.
485  uint32_t m_PadLeft;
486  /// Padding right value in the width dimension.
487  uint32_t m_PadRight;
488  /// Padding top value in the height dimension.
489  uint32_t m_PadTop;
490  /// Padding bottom value in the height dimension.
491  uint32_t m_PadBottom;
492  /// Stride value when proceeding through input for the width dimension.
493  uint32_t m_StrideX;
494  /// Stride value when proceeding through input for the height dimension.
495  uint32_t m_StrideY;
496  /// Dilation factor value for width dimension.
497  uint32_t m_DilationX;
498  /// Dilation factor value for height dimension.
499  uint32_t m_DilationY;
500  /// Enable/disable bias.
502  /// The data layout to be used (NCHW, NHWC).
504 };
505 
507 {
509  : m_MaxDetections(0)
510  , m_MaxClassesPerDetection(1)
511  , m_DetectionsPerClass(1)
512  , m_NmsScoreThreshold(0)
513  , m_NmsIouThreshold(0)
514  , m_NumClasses(0)
515  , m_UseRegularNms(false)
516  , m_ScaleX(0)
517  , m_ScaleY(0)
518  , m_ScaleW(0)
519  , m_ScaleH(0)
520  {}
521 
523  {
524  return m_MaxDetections == rhs.m_MaxDetections &&
525  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
526  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
527  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
528  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
529  m_NumClasses == rhs.m_NumClasses &&
530  m_UseRegularNms == rhs.m_UseRegularNms &&
531  m_ScaleX == rhs.m_ScaleX &&
532  m_ScaleY == rhs.m_ScaleY &&
533  m_ScaleW == rhs.m_ScaleW &&
534  m_ScaleH == rhs.m_ScaleH;
535  }
536 
537  /// Maximum numbers of detections.
538  uint32_t m_MaxDetections;
539  /// Maximum numbers of classes per detection, used in Fast NMS.
541  /// Detections per classes, used in Regular NMS.
543  /// NMS score threshold.
545  /// Intersection over union threshold.
547  /// Number of classes.
548  uint32_t m_NumClasses;
549  /// Use Regular NMS.
551  /// Center size encoding scale x.
552  float m_ScaleX;
553  /// Center size encoding scale y.
554  float m_ScaleY;
555  /// Center size encoding scale weight.
556  float m_ScaleW;
557  /// Center size encoding scale height.
558  float m_ScaleH;
559 };
560 
561 /// A NormalizationDescriptor for the NormalizationLayer.
563 {
565  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
566  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
567  , m_NormSize(0)
568  , m_Alpha(0.f)
569  , m_Beta(0.f)
570  , m_K(0.f)
571  , m_DataLayout(DataLayout::NCHW)
572  {}
573 
574  bool operator ==(const NormalizationDescriptor& rhs) const
575  {
576  return m_NormChannelType == rhs.m_NormChannelType &&
577  m_NormMethodType == rhs.m_NormMethodType &&
578  m_NormSize == rhs.m_NormSize &&
579  m_Alpha == rhs.m_Alpha &&
580  m_Beta == rhs.m_Beta &&
581  m_K == rhs.m_K &&
582  m_DataLayout == rhs.m_DataLayout;
583  }
584 
585  /// Normalization channel algorithm to use (Across, Within).
587  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
589  /// Depth radius value.
590  uint32_t m_NormSize;
591  /// Alpha value for the normalization equation.
592  float m_Alpha;
593  /// Beta value for the normalization equation.
594  float m_Beta;
595  /// Kappa value used for the across channel normalization equation.
596  float m_K;
597  /// The data layout to be used (NCHW, NHWC).
599 };
600 
601 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
603 {
605  : m_Eps(1e-12f)
606  , m_DataLayout(DataLayout::NCHW)
607  {}
608 
609  bool operator ==(const L2NormalizationDescriptor& rhs) const
610  {
611  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
612  }
613 
614  /// Used to avoid dividing by zero.
615  float m_Eps;
616  /// The data layout to be used (NCHW, NHWC).
618 };
619 
620 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
622 {
624  : m_Eps(0.0001f)
625  , m_DataLayout(DataLayout::NCHW)
626  {}
627 
629  {
630  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
631  }
632 
633  /// Value to add to the variance. Used to avoid dividing by zero.
634  float m_Eps;
635  /// The data layout to be used (NCHW, NHWC).
637 };
638 
639 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
641 {
643  : m_Gamma(1.0f)
644  , m_Beta(0.0f)
645  , m_Eps(1e-12f)
646  , m_DataLayout(DataLayout::NCHW)
647  {}
648 
650  {
651  return m_Gamma == rhs.m_Gamma &&
652  m_Beta == rhs.m_Beta &&
653  m_Eps == rhs.m_Eps &&
654  m_DataLayout == rhs.m_DataLayout;
655  }
656 
657  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
658  float m_Gamma;
659  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
660  float m_Beta;
661  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
662  float m_Eps;
663  /// The data layout to be used (NCHW, NHWC).
665 };
666 
667 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
669 {
671  : m_BlockShape({1, 1})
672  , m_Crops({{0, 0}, {0, 0}})
673  , m_DataLayout(DataLayout::NCHW)
674  {}
675 
676  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
677  std::vector<std::pair<unsigned int, unsigned int>> crops)
678  : m_BlockShape(blockShape)
679  , m_Crops(crops)
680  , m_DataLayout(DataLayout::NCHW)
681  {}
682 
683  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
684  {
685  return m_BlockShape == rhs.m_BlockShape &&
686  m_Crops == rhs.m_Crops &&
687  m_DataLayout == rhs.m_DataLayout;
688  }
689 
690  /// Block shape values.
691  std::vector<unsigned int> m_BlockShape;
692  /// The values to crop from the input dimension.
693  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
694  /// The data layout to be used (NCHW, NHWC).
696 };
697 
698 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
700 {
702  : m_Min(-6.0f)
703  , m_Max(6.0f)
704  {}
705 
707  {
708  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
709  }
710 
711  /// Minimum value.
712  float m_Min;
713  /// Maximum value.
714  float m_Max;
715 };
716 
717 /// A FillDescriptor for the FillLayer
719 {
721  : m_Value(0)
722  {}
723 
724  FillDescriptor(const float& value)
725  : m_Value(value)
726  {}
727 
728  bool operator ==(const FillDescriptor& rhs) const
729  {
730  return m_Value == rhs.m_Value;
731  }
732 
733  float m_Value;
734 };
735 
736 /// A GatherDescriptor for the GatherLayer.
738 {
740  : m_Axis(0)
741  {}
742 
743  GatherDescriptor(int32_t axis)
744  : m_Axis(axis)
745  {}
746 
747  bool operator ==(const GatherDescriptor& rhs) const
748  {
749  return m_Axis == rhs.m_Axis;
750  }
751 
752  /// The axis in params to gather indices from
753  int32_t m_Axis;
754 };
755 
756 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
758 {
760  : m_TargetWidth(0)
761  , m_TargetHeight(0)
762  , m_DataLayout(DataLayout::NCHW)
763  , m_AlignCorners(false)
764  , m_HalfPixelCenters(false)
765  {}
766 
767  /// Target width value.
768  uint32_t m_TargetWidth;
769  /// Target height value.
770  uint32_t m_TargetHeight;
771  /// The data layout to be used (NCHW, NHWC).
773  /// Aligned corners
775  /// Half Pixel Centers
777 };
778 
779 /// A ResizeDescriptor for the ResizeLayer.
781 {
783  : m_TargetWidth(0)
784  , m_TargetHeight(0)
785  , m_Method(ResizeMethod::NearestNeighbor)
786  , m_DataLayout(DataLayout::NCHW)
787  , m_AlignCorners(false)
788  , m_HalfPixelCenters(false)
789  {}
790 
791  bool operator ==(const ResizeDescriptor& rhs) const
792  {
793  return m_TargetWidth == rhs.m_TargetWidth &&
794  m_TargetHeight == rhs.m_TargetHeight &&
795  m_Method == rhs.m_Method &&
796  m_DataLayout == rhs.m_DataLayout &&
797  m_AlignCorners == rhs.m_AlignCorners &&
798  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
799  }
800 
801  /// Target width value.
802  uint32_t m_TargetWidth;
803  /// Target height value.
804  uint32_t m_TargetHeight;
805  /// The Interpolation method to use
806  /// (Bilinear, NearestNeighbor).
808  /// The data layout to be used (NCHW, NHWC).
810  /// Aligned corners
812  /// Half Pixel Centers
814 };
815 
816 
817 /// A ReshapeDescriptor for the ReshapeLayer.
819 {
821  : m_TargetShape()
822  {}
823 
825  : m_TargetShape(shape)
826  {}
827 
828  bool operator ==(const ReshapeDescriptor& rhs) const
829  {
830  return m_TargetShape == rhs.m_TargetShape;
831  }
832 
833  /// Target shape value.
835 };
836 
837 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
839 {
841  : m_BlockShape({1, 1})
842  , m_PadList({{0, 0}, {0, 0}})
843  , m_DataLayout(DataLayout::NCHW)
844  {}
845 
846  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
847  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
848  : m_BlockShape(blockShape)
849  , m_PadList(padList)
850  , m_DataLayout(DataLayout::NCHW)
851  {}
852 
853  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
854  {
855  return m_BlockShape == rhs.m_BlockShape &&
856  m_PadList == rhs.m_PadList &&
857  m_DataLayout == rhs.m_DataLayout;
858  }
859 
860  /// Block shape value.
861  std::vector<unsigned int> m_BlockShape;
862  /// @brief Specifies the padding values for the input dimension:
863  /// heightPad{top, bottom} widthPad{left, right}.
864  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
865  /// The data layout to be used (NCHW, NHWC).
867 };
868 
869 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
871 {
874  {}
875 
876  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
877  : m_BlockSize(blockSize)
878  , m_DataLayout(dataLayout)
879  {}
880 
881  bool operator ==(const SpaceToDepthDescriptor& rhs) const
882  {
883  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
884  }
885 
886  /// Scalar specifying the input block size. It must be >= 1
887  unsigned int m_BlockSize;
888 
889  /// The data layout to be used (NCHW, NHWC).
891 };
892 
893 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
895 
896 /// An LstmDescriptor for the LstmLayer.
898 {
900  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
901  , m_ClippingThresCell(0.0)
902  , m_ClippingThresProj(0.0)
903  , m_CifgEnabled(true)
904  , m_PeepholeEnabled(false)
905  , m_ProjectionEnabled(false)
906  , m_LayerNormEnabled(false)
907  {}
908 
909  bool operator ==(const LstmDescriptor& rhs) const
910  {
911  return m_ActivationFunc == rhs.m_ActivationFunc &&
912  m_ClippingThresCell == rhs.m_ClippingThresCell &&
913  m_ClippingThresProj == rhs.m_ClippingThresProj &&
914  m_CifgEnabled == rhs.m_CifgEnabled &&
915  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
916  m_LayerNormEnabled == rhs.m_LayerNormEnabled;
917  }
918 
919  /// @brief The activation function to use.
920  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
922  /// Clipping threshold value for the cell state.
924  /// Clipping threshold value for the projection.
926  /// Enable/disable cifg (coupled input & forget gate).
928  /// Enable/disable peephole.
930  /// Enable/disable the projection layer.
932  /// Enable/disable layer normalization
934 };
935 
936 /// A MeanDescriptor for the MeanLayer.
938 {
940  : m_Axis()
941  , m_KeepDims(false)
942  {}
943 
944  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
945  : m_Axis(axis)
946  , m_KeepDims(keepDims)
947  {}
948 
949  bool operator ==(const MeanDescriptor& rhs) const
950  {
951  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
952  }
953 
954  /// Values for the dimensions to reduce.
955  std::vector<unsigned int> m_Axis;
956  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
958 };
959 
960 /// A PadDescriptor for the PadLayer.
962 {
963  PadDescriptor() : m_PadValue(0)
964  {}
965 
966  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
967  : m_PadList(padList)
968  , m_PadValue(padValue)
969  {}
970 
971  bool operator ==(const PadDescriptor& rhs) const
972  {
973  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
974  }
975 
976  /// @brief Specifies the padding for input dimension.
977  /// First is the number of values to add before the tensor in the dimension.
978  /// Second is the number of values to add after the tensor in the dimension.
979  /// The number of pairs should match the number of dimensions in the input tensor.
980  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
981 
982  /// Optional value to use for padding, defaults to 0
983  float m_PadValue;
984 };
985 
986 /// A SliceDescriptor for the SliceLayer.
988 {
989  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
990  : m_Begin(begin)
991  , m_Size(size)
992  {}
993 
995  {}
996 
997  bool operator ==(const SliceDescriptor& rhs) const
998  {
999  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1000  }
1001 
1002  /// Beginning indices of the slice in each dimension.
1003  std::vector<unsigned int> m_Begin;
1004 
1005  /// Size of the slice in each dimension.
1006  std::vector<unsigned int> m_Size;
1007 };
1008 
1009 /// A StackDescriptor for the StackLayer.
1011 {
1013  : m_Axis(0)
1014  , m_NumInputs(0)
1015  , m_InputShape()
1016  {}
1017 
1018  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1019  : m_Axis(axis)
1020  , m_NumInputs(numInputs)
1021  , m_InputShape(inputShape)
1022  {}
1023 
1024  bool operator ==(const StackDescriptor& rhs) const
1025  {
1026  return m_Axis == rhs.m_Axis &&
1027  m_NumInputs == rhs.m_NumInputs &&
1028  m_InputShape == rhs.m_InputShape;
1029  }
1030 
1031  /// 0-based axis along which to stack the input tensors.
1032  uint32_t m_Axis;
1033  /// Number of input tensors.
1034  uint32_t m_NumInputs;
1035  /// Required shape of all input tensors.
1037 };
1038 
1039 /// A StandInDescriptor for the StandIn layer
1041 {
1043 
1044  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1045  : m_NumInputs(numInputs)
1046  , m_NumOutputs(numOutputs)
1047  {}
1048 
1049  bool operator ==(const StandInDescriptor& rhs) const
1050  {
1051  return m_NumInputs == rhs.m_NumInputs &&
1052  m_NumOutputs == rhs.m_NumOutputs;
1053  }
1054 
1055  /// Number of input tensors
1056  uint32_t m_NumInputs = 0;
1057  /// Number of output tensors
1058  uint32_t m_NumOutputs = 0;
1059 };
1060 
1061 /// A StridedSliceDescriptor for the StridedSliceLayer.
1063 {
1064  StridedSliceDescriptor(const std::vector<int>& begin,
1065  const std::vector<int>& end,
1066  const std::vector<int>& stride)
1067  : m_Begin(begin)
1068  , m_End(end)
1069  , m_Stride(stride)
1070  , m_BeginMask(0)
1071  , m_EndMask(0)
1072  , m_ShrinkAxisMask(0)
1073  , m_EllipsisMask(0)
1074  , m_NewAxisMask(0)
1075  , m_DataLayout(DataLayout::NCHW)
1076  {}
1077 
1079  : StridedSliceDescriptor({}, {}, {})
1080  {}
1081 
1082  bool operator ==(const StridedSliceDescriptor& rhs) const
1083  {
1084  return m_Begin == rhs.m_Begin &&
1085  m_End == rhs.m_End &&
1086  m_Stride == rhs.m_Stride &&
1087  m_BeginMask == rhs.m_BeginMask &&
1088  m_EndMask == rhs.m_EndMask &&
1089  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1090  m_EllipsisMask == rhs.m_EllipsisMask &&
1091  m_NewAxisMask == rhs.m_NewAxisMask &&
1092  m_DataLayout == rhs.m_DataLayout;
1093  }
1094 
1095  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1096  int GetStopForAxis(const TensorShape& inputShape,
1097  unsigned int axis,
1098  int startForAxis) const;
1099 
1100  /// Begin values for the input that will be sliced.
1101  std::vector<int> m_Begin;
1102  /// End values for the input that will be sliced.
1103  std::vector<int> m_End;
1104  /// Stride values for the input that will be sliced.
1105  std::vector<int> m_Stride;
1106 
1107  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1108  /// range is used for the dimension.
1109  int32_t m_BeginMask;
1110  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1111  /// is used for the dimension.
1112  int32_t m_EndMask;
1113  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1115  /// Ellipsis mask value.
1117  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1118  /// a new 1 dimension is inserted to this location of the output tensor.
1119  int32_t m_NewAxisMask;
1120 
1121  /// The data layout to be used (NCHW, NHWC).
1123 };
1124 
1125 /// A PreCompiledDescriptor for the PreCompiledLayer.
1127 {
1128  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1129  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1130  {}
1131 
1132  ~PreCompiledDescriptor() = default;
1133 
1134  unsigned int m_NumInputSlots;
1135  unsigned int m_NumOutputSlots;
1136 };
1137 
1138 /// A QLstmDescriptor for the QLstmLayer.
1140 {
1142  : m_CellClip(0.0)
1143  , m_ProjectionClip(0.0)
1144  , m_CifgEnabled(true)
1145  , m_PeepholeEnabled(false)
1146  , m_ProjectionEnabled(false)
1147  , m_LayerNormEnabled(false)
1148  , m_InputIntermediateScale(0.0)
1149  , m_ForgetIntermediateScale(0.0)
1150  , m_CellIntermediateScale(0.0)
1151  , m_OutputIntermediateScale(0.0)
1152  , m_HiddenStateZeroPoint(0)
1153  , m_HiddenStateScale(0.0)
1154  {}
1155 
1156  bool operator ==(const QLstmDescriptor& rhs) const
1157  {
1158  return m_CellClip == rhs.m_CellClip &&
1159  m_ProjectionClip == rhs.m_ProjectionClip &&
1160  m_CifgEnabled == rhs.m_CifgEnabled &&
1161  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1162  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1163  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1164  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1165  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1166  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1167  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1168  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1169  m_HiddenStateScale == rhs.m_HiddenStateScale;
1170  }
1171 
1172  /// Clipping threshold value for the cell state
1173  float m_CellClip;
1174  /// Clipping threshold value for the projection
1176  /// Enable/disable CIFG (coupled input & forget gate).
1178  /// Enable/disable peephole
1180  /// Enable/disable the projection layer
1182  /// Enable/disable layer normalization
1184  /// Input intermediate quantization scale
1186  /// Forget intermediate quantization scale
1188  /// Cell intermediate quantization scale
1190  /// Output intermediate quantization scale
1192  /// Hidden State zero point
1194  /// Hidden State quantization scale
1196 };
1197 
1198 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1200 {
1202  m_PadLeft(0),
1203  m_PadRight(0),
1204  m_PadTop(0),
1205  m_PadBottom(0),
1206  m_StrideX(0),
1207  m_StrideY(0),
1208  m_BiasEnabled(false),
1209  m_DataLayout(DataLayout::NCHW),
1210  m_OutputShapeEnabled(false)
1211  {}
1212 
1214  {
1215  return m_PadLeft == rhs.m_PadLeft &&
1216  m_PadRight == rhs.m_PadRight &&
1217  m_PadTop == rhs.m_PadTop &&
1218  m_PadBottom == rhs.m_PadBottom &&
1219  m_StrideX == rhs.m_StrideX &&
1220  m_StrideY == rhs.m_StrideY &&
1221  m_BiasEnabled == rhs.m_BiasEnabled &&
1222  m_DataLayout == rhs.m_DataLayout &&
1223  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1224  m_OutputShape == rhs.m_OutputShape;
1225  }
1226 
1227  /// Padding left value in the width dimension.
1228  uint32_t m_PadLeft;
1229  /// Padding right value in the width dimension.
1230  uint32_t m_PadRight;
1231  /// Padding top value in the height dimension.
1232  uint32_t m_PadTop;
1233  /// Padding bottom value in the height dimension.
1234  uint32_t m_PadBottom;
1235  /// Stride value when proceeding through input for the width dimension.
1236  uint32_t m_StrideX;
1237  /// Stride value when proceeding through input for the height dimension.
1238  uint32_t m_StrideY;
1239  /// Enable/disable bias.
1241  /// The data layout to be used (NCHW, NHWC).
1243  /// Output shape if it has been specified.
1245  std::vector<unsigned int> m_OutputShape;
1246 };
1247 
1248 /// A TransposeDescriptor for the TransposeLayer.
1250 {
1252  : m_DimMappings{}
1253  {}
1254 
1256  : m_DimMappings(dimMappings)
1257  {}
1258 
1259  bool operator ==(const TransposeDescriptor &rhs) const
1260  {
1261  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1262  }
1263 
1264  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1265  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1267 };
1268 
1269 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1271 {
1274  {}
1275 
1277  : m_Operation(operation)
1278  {}
1279 
1280  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1281  {
1282  return m_Operation == rhs.m_Operation;
1283  }
1284 
1285  /// Specifies the logical operation to execute
1287 };
1288 
1289 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
Definition: Descriptors.hpp:99
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_HalfPixelCenters
Half Pixel Centers.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
bool m_AlignCorners
Aligned corners.
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:50
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:73
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout::NCHW false
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:28
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:65
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:141
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool operator==(const ActivationDescriptor &rhs) const
Definition: Descriptors.hpp:36
uint32_t m_PadTop
Padding top value in the height dimension.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Copyright (c) 2020 ARM Limited.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:104
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:88
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:133
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:79
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:78
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:32
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
An OriginsDescriptor for the ConcatLayer.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:26
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:89
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:155
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:45
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:93
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:72
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:111
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:94
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:67
float m_ScaleY
Center size encoding scale y.
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:147
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:47
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
armnn::DataType m_Output_Type
Definition: Descriptors.hpp:69
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:56
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.