ArmNN
 21.05
Descriptors.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "Deprecated.hpp"
8 #include "DescriptorsFwd.hpp"
9 
10 #include <cstdint>
11 #include <initializer_list>
12 #include <iostream>
13 #include <sstream>
14 
15 #include "Tensor.hpp"
16 #include "Types.hpp"
17 
18 namespace armnn
19 {
20 
21 /// Base class for all descriptors.
22 struct BaseDescriptor {};
23 
24 /// An ActivationDescriptor for the ActivationLayer.
26 {
28  : m_Function(ActivationFunction::Sigmoid)
29  , m_A(0)
30  , m_B(0)
31  {}
32 
34  float a = 0,
35  float b = 0)
36  : m_Function(activation)
37  , m_A(a)
38  , m_B(b)
39  {}
40 
41  bool operator ==(const ActivationDescriptor &rhs) const
42  {
43  return m_Function == rhs.m_Function && m_A == rhs.m_B && m_B == rhs.m_B;
44  }
45 
46  /// @brief The activation function to use
47  /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
49  /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
50  float m_A;
51  /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
52  float m_B;
53 };
54 
55 /// An ArgMinMaxDescriptor for ArgMinMaxLayer
57 {
59  : m_Function(ArgMinMaxFunction::Min)
60  , m_Axis(-1)
61  , m_Output_Type(armnn::DataType::Signed32)
62  {}
63 
64  bool operator ==(const ArgMinMaxDescriptor &rhs) const
65  {
66  return m_Function == rhs.m_Function && m_Axis == rhs.m_Axis && m_Output_Type == rhs.m_Output_Type;
67  }
68 
69  /// Specify if the function is to find Min or Max.
71  /// Axis to reduce across the input tensor.
72  int m_Axis;
73  /// Deprecated and will be removed in future release.
75 };
76 
77 /// A ComparisonDescriptor for the ComparisonLayer
79 {
82  {}
83 
85  : m_Operation(operation)
86  {}
87 
88  bool operator ==(const ComparisonDescriptor &rhs) const
89  {
90  return m_Operation == rhs.m_Operation;
91  }
92 
93  /// Specifies the comparison operation to execute
95 };
96 
97 /// A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer
99 {
102  {}
103 
105  : m_Operation(operation)
106  {}
107 
109  {
110  return m_Operation == rhs.m_Operation;
111  }
112 
113  /// Specifies the elementwiseUnary operation to execute
115 };
116 
117 /// A PermuteDescriptor for the PermuteLayer.
119 {
121  : m_DimMappings{}
122  {}
123 
125  : m_DimMappings(dimMappings)
126  {}
127 
128  bool operator ==(const PermuteDescriptor &rhs) const
129  {
130  return m_DimMappings.IsEqual(rhs.m_DimMappings);
131  }
132 
133  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
134  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
136 };
137 
138 /// A SoftmaxDescriptor for the SoftmaxLayer.
140 {
142  : m_Beta(1.0f)
143  , m_Axis(-1)
144  {}
145 
146  bool operator ==(const SoftmaxDescriptor& rhs) const
147  {
148  return m_Beta == rhs.m_Beta && m_Axis == rhs.m_Axis;
149  }
150 
151  /// Exponentiation value.
152  float m_Beta;
153  /// Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed on.
154  int m_Axis;
155 };
156 
157 /// A LogSoftmaxDescriptor for the LogSoftmaxLayer
159 
160 /// @brief An OriginsDescriptor for the ConcatLayer.
161 /// Descriptor to configure the concatenation process. Number of views must be equal to the number of inputs, and
162 /// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
164 {
166  OriginsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
167  OriginsDescriptor(const OriginsDescriptor& other);
169 
171 
172  OriginsDescriptor& operator=(OriginsDescriptor rhs);
173 
174  bool operator ==(const OriginsDescriptor& rhs) const;
175 
176  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
177  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
178  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
179  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
180  /// Get the number of views.
181  uint32_t GetNumViews() const;
182  /// Get the number of dimensions.
183  uint32_t GetNumDimensions() const;
184  /// Return the view origin at the int value idx.
185  const uint32_t* GetViewOrigin(uint32_t idx) const;
186  /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
187  /// The number of views must match number of elements in the new ordering array.
188  void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
189  /// Swap the ViewsDescriptor values first and second.
190  friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
191  /// Set the concatenation axis value.
192  void SetConcatAxis(unsigned int concatAxis);
193  /// Get the concatenation axis value.
194  unsigned int GetConcatAxis() const;
195 
196 private:
197  unsigned int m_ConcatAxis;
198  uint32_t m_NumViews;
199  uint32_t m_NumDimensions;
200  uint32_t** m_ViewOrigins;
201 };
202 
203 /// @brief A ViewsDescriptor for the SplitterLayer.
204 /// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
205 /// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
207 {
208  ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
209  ViewsDescriptor(const ViewsDescriptor& other);
210  ViewsDescriptor();
212 
213  ~ViewsDescriptor();
214 
215  ViewsDescriptor& operator=(ViewsDescriptor rhs);
216 
217  bool operator ==(const ViewsDescriptor& rhs) const;
218 
219  /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
220  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
221  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
222  Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
223  /// @brief Set the size of the views. The arguments are: view, dimension, value.
224  /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
225  /// If the coord is greater than or equal to GetNumDimensions(), then the coord argument is out of range.
226  Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
227 
228  /// Get the number of views.
229  uint32_t GetNumViews() const;
230  /// Get the number of dimensions.
231  uint32_t GetNumDimensions() const;
232  /// Get the view origin at the int value idx.
233  const uint32_t* GetViewOrigin(uint32_t idx) const;
234  /// Get the view sizes at the int value idx.
235  const uint32_t* GetViewSizes(uint32_t idx) const;
236  /// Get the View Origins
237  const OriginsDescriptor& GetOrigins() const;
238 
239  /// Swap the ViewsDescriptor value first and second.
240  friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
241 private:
242  OriginsDescriptor m_Origins;
243  uint32_t** m_ViewSizes;
244 };
245 
246 template <typename TensorShapeIt>
247 ARMNN_DEPRECATED_MSG("Use CreateDescriptorForConcatenation instead")
249  TensorShapeIt last,
250  unsigned int concatenationDimension)
251 {
252  return CreateDescriptorForConcatenation(first, last, concatenationDimension);
253 }
254 
255 /// @brief Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing
256 /// concatenation of a number of input tensors.
257 template <typename TensorShapeIt>
259  TensorShapeIt last,
260  unsigned int concatenationDimension)
261 {
262  auto numInputs = std::distance(first, last);
263 
264  if (numInputs < 2)
265  {
266  throw InvalidArgumentException("Concatenation requires at least 2 inputs");
267  }
268 
269  const auto& firstInputShape = *first;
270 
271  const unsigned int numDimensions = firstInputShape.GetNumDimensions();
272  for (auto it = first + 1; it != last; ++it)
273  {
274  if (it->GetNumDimensions() != numDimensions)
275  {
276  throw InvalidArgumentException("All inputs to concatenation must have the same number of dimensions");
277  }
278  }
279 
280  if (concatenationDimension >= numDimensions)
281  {
282  throw InvalidArgumentException("concatenationDimension must be between 0 and the number of dimensions.");
283  }
284 
285  for (auto it = first; it != last; ++it)
286  {
287  for (unsigned int d = 0; d < numDimensions; ++d)
288  {
289  const bool dimSizeOk = (d == concatenationDimension) || (firstInputShape[d] == (*it)[d]);
290  if (!dimSizeOk)
291  {
292  throw InvalidArgumentException("All inputs to concatenation must be the same size along all dimensions "
293  " except the concatenation dimension");
294  }
295  }
296  }
297 
298  OriginsDescriptor viewsDescriptor(static_cast<uint32_t>(numInputs), numDimensions);
299  viewsDescriptor.SetConcatAxis(concatenationDimension);
300 
301  uint32_t viewIndex = 0u;
302  uint32_t coordAlongConcatDim = 0u;
303  for (auto it = first; it != last; ++it)
304  {
305  const auto& inputShape = *it;
306 
307  for (unsigned int i = 0; i < concatenationDimension; ++i)
308  {
309  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
310  }
311 
312  viewsDescriptor.SetViewOriginCoord(viewIndex, concatenationDimension, coordAlongConcatDim);
313  unsigned int dimSize = inputShape[concatenationDimension];
314  coordAlongConcatDim += dimSize;
315 
316 
317  for (unsigned int i = concatenationDimension + 1; i < numDimensions; ++i)
318  {
319  viewsDescriptor.SetViewOriginCoord(viewIndex, i, 0);
320  }
321 
322  ++viewIndex;
323  }
324 
325  return viewsDescriptor;
326 }
327 
328 /// A Pooling2dDescriptor for the Pooling2dLayer.
330 {
332  : m_PoolType(PoolingAlgorithm::Max)
333  , m_PadLeft(0)
334  , m_PadRight(0)
335  , m_PadTop(0)
336  , m_PadBottom(0)
337  , m_PoolWidth(0)
338  , m_PoolHeight(0)
339  , m_StrideX(0)
340  , m_StrideY(0)
341  , m_OutputShapeRounding(OutputShapeRounding::Floor)
342  , m_PaddingMethod(PaddingMethod::Exclude)
343  , m_DataLayout(DataLayout::NCHW)
344  {}
345 
346  bool operator ==(const Pooling2dDescriptor& rhs) const
347  {
348  return m_PoolType == rhs.m_PoolType &&
349  m_PadLeft == rhs.m_PadLeft &&
350  m_PadRight == rhs.m_PadRight &&
351  m_PadTop == rhs.m_PadTop &&
352  m_PadBottom == rhs.m_PadBottom &&
353  m_PoolWidth == rhs.m_PoolWidth &&
354  m_PoolHeight == rhs.m_PoolHeight &&
355  m_StrideX == rhs.m_StrideX &&
356  m_StrideY == rhs.m_StrideY &&
357  m_OutputShapeRounding == rhs.m_OutputShapeRounding &&
358  m_PaddingMethod == rhs.m_PaddingMethod &&
359  m_DataLayout == rhs.m_DataLayout;
360  }
361 
362  /// The pooling algorithm to use (Max. Average, L2).
364  /// Padding left value in the width dimension.
365  uint32_t m_PadLeft;
366  /// Padding right value in the width dimension.
367  uint32_t m_PadRight;
368  /// Padding top value in the height dimension.
369  uint32_t m_PadTop;
370  /// Padding bottom value in the height dimension.
371  uint32_t m_PadBottom;
372  /// Pooling width value.
373  uint32_t m_PoolWidth;
374  /// Pooling height value.
375  uint32_t m_PoolHeight;
376  /// Stride value when proceeding through input for the width dimension.
377  uint32_t m_StrideX;
378  /// Stride value when proceeding through input for the height dimension.
379  uint32_t m_StrideY;
380  /// The rounding method for the output shape. (Floor, Ceiling).
382  /// The padding method to be used. (Exclude, IgnoreValue).
384  /// The data layout to be used (NCHW, NHWC).
386 };
387 
388 /// A FullyConnectedDescriptor for the FullyConnectedLayer.
390 {
392  : m_BiasEnabled(false)
393  , m_TransposeWeightMatrix(false)
394  , m_ConstantWeights(true)
395  {}
396 
397  bool operator ==(const FullyConnectedDescriptor& rhs) const
398  {
399  return m_BiasEnabled == rhs.m_BiasEnabled
400  && m_TransposeWeightMatrix == rhs.m_TransposeWeightMatrix
401  && m_ConstantWeights == rhs.m_ConstantWeights;
402  }
403 
404  /// Get the number of views/inputs.
405  uint32_t GetNumViews() const;
406 
407  /// Enable/disable bias.
409  /// Enable/disable transpose weight matrix.
411  /// Enable/disable constant weights and biases.
413 };
414 
415 /// A Convolution2dDescriptor for the Convolution2dLayer.
417 {
419  : m_PadLeft(0)
420  , m_PadRight(0)
421  , m_PadTop(0)
422  , m_PadBottom(0)
423  , m_StrideX(1)
424  , m_StrideY(1)
425  , m_DilationX(1)
426  , m_DilationY(1)
427  , m_BiasEnabled(false)
428  , m_DataLayout(DataLayout::NCHW)
429  {}
430 
431  bool operator ==(const Convolution2dDescriptor& rhs) const
432  {
433  return m_PadLeft == rhs.m_PadLeft &&
434  m_PadRight == rhs.m_PadRight &&
435  m_PadTop == rhs.m_PadTop &&
436  m_PadBottom == rhs.m_PadBottom &&
437  m_StrideX == rhs.m_StrideX &&
438  m_StrideY == rhs.m_StrideY &&
439  m_DilationX == rhs.m_DilationX &&
440  m_DilationY == rhs.m_DilationY &&
441  m_BiasEnabled == rhs.m_BiasEnabled &&
442  m_DataLayout == rhs.m_DataLayout;
443  }
444 
445  /// Padding left value in the width dimension.
446  uint32_t m_PadLeft;
447  /// Padding right value in the width dimension.
448  uint32_t m_PadRight;
449  /// Padding top value in the height dimension.
450  uint32_t m_PadTop;
451  /// Padding bottom value in the height dimension.
452  uint32_t m_PadBottom;
453  /// Stride value when proceeding through input for the width dimension.
454  uint32_t m_StrideX;
455  /// Stride value when proceeding through input for the height dimension.
456  uint32_t m_StrideY;
457  /// Dilation along x axis
458  uint32_t m_DilationX;
459  /// Dilation along y axis
460  uint32_t m_DilationY;
461  /// Enable/disable bias.
463  /// The data layout to be used (NCHW, NHWC).
465 };
466 
467 /// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
469 {
471  : m_PadLeft(0)
472  , m_PadRight(0)
473  , m_PadTop(0)
474  , m_PadBottom(0)
475  , m_StrideX(1)
476  , m_StrideY(1)
477  , m_DilationX(1)
478  , m_DilationY(1)
479  , m_BiasEnabled(false)
480  , m_DataLayout(DataLayout::NCHW)
481  {}
482 
484  {
485  return m_PadLeft == rhs.m_PadLeft &&
486  m_PadRight == rhs.m_PadRight &&
487  m_PadTop == rhs.m_PadTop &&
488  m_PadBottom == rhs.m_PadBottom &&
489  m_StrideX == rhs.m_StrideX &&
490  m_StrideY == rhs.m_StrideY &&
491  m_DilationX == rhs.m_DilationX &&
492  m_DilationY == rhs.m_DilationY &&
493  m_BiasEnabled == rhs.m_BiasEnabled &&
494  m_DataLayout == rhs.m_DataLayout;
495  }
496 
497  /// Padding left value in the width dimension.
498  uint32_t m_PadLeft;
499  /// Padding right value in the width dimension.
500  uint32_t m_PadRight;
501  /// Padding top value in the height dimension.
502  uint32_t m_PadTop;
503  /// Padding bottom value in the height dimension.
504  uint32_t m_PadBottom;
505  /// Stride value when proceeding through input for the width dimension.
506  uint32_t m_StrideX;
507  /// Stride value when proceeding through input for the height dimension.
508  uint32_t m_StrideY;
509  /// Dilation factor value for width dimension.
510  uint32_t m_DilationX;
511  /// Dilation factor value for height dimension.
512  uint32_t m_DilationY;
513  /// Enable/disable bias.
515  /// The data layout to be used (NCHW, NHWC).
517 };
518 
520 {
522  : m_MaxDetections(0)
523  , m_MaxClassesPerDetection(1)
524  , m_DetectionsPerClass(1)
525  , m_NmsScoreThreshold(0)
526  , m_NmsIouThreshold(0)
527  , m_NumClasses(0)
528  , m_UseRegularNms(false)
529  , m_ScaleX(0)
530  , m_ScaleY(0)
531  , m_ScaleW(0)
532  , m_ScaleH(0)
533  {}
534 
536  {
537  return m_MaxDetections == rhs.m_MaxDetections &&
538  m_MaxClassesPerDetection == rhs.m_MaxClassesPerDetection &&
539  m_DetectionsPerClass == rhs.m_DetectionsPerClass &&
540  m_NmsScoreThreshold == rhs.m_NmsScoreThreshold &&
541  m_NmsIouThreshold == rhs.m_NmsIouThreshold &&
542  m_NumClasses == rhs.m_NumClasses &&
543  m_UseRegularNms == rhs.m_UseRegularNms &&
544  m_ScaleX == rhs.m_ScaleX &&
545  m_ScaleY == rhs.m_ScaleY &&
546  m_ScaleW == rhs.m_ScaleW &&
547  m_ScaleH == rhs.m_ScaleH;
548  }
549 
550  /// Maximum numbers of detections.
551  uint32_t m_MaxDetections;
552  /// Maximum numbers of classes per detection, used in Fast NMS.
554  /// Detections per classes, used in Regular NMS.
556  /// NMS score threshold.
558  /// Intersection over union threshold.
560  /// Number of classes.
561  uint32_t m_NumClasses;
562  /// Use Regular NMS.
564  /// Center size encoding scale x.
565  float m_ScaleX;
566  /// Center size encoding scale y.
567  float m_ScaleY;
568  /// Center size encoding scale weight.
569  float m_ScaleW;
570  /// Center size encoding scale height.
571  float m_ScaleH;
572 };
573 
574 /// A NormalizationDescriptor for the NormalizationLayer.
576 {
578  : m_NormChannelType(NormalizationAlgorithmChannel::Across)
579  , m_NormMethodType(NormalizationAlgorithmMethod::LocalBrightness)
580  , m_NormSize(0)
581  , m_Alpha(0.f)
582  , m_Beta(0.f)
583  , m_K(0.f)
584  , m_DataLayout(DataLayout::NCHW)
585  {}
586 
587  bool operator ==(const NormalizationDescriptor& rhs) const
588  {
589  return m_NormChannelType == rhs.m_NormChannelType &&
590  m_NormMethodType == rhs.m_NormMethodType &&
591  m_NormSize == rhs.m_NormSize &&
592  m_Alpha == rhs.m_Alpha &&
593  m_Beta == rhs.m_Beta &&
594  m_K == rhs.m_K &&
595  m_DataLayout == rhs.m_DataLayout;
596  }
597 
598  /// Normalization channel algorithm to use (Across, Within).
600  /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
602  /// Depth radius value.
603  uint32_t m_NormSize;
604  /// Alpha value for the normalization equation.
605  float m_Alpha;
606  /// Beta value for the normalization equation.
607  float m_Beta;
608  /// Kappa value used for the across channel normalization equation.
609  float m_K;
610  /// The data layout to be used (NCHW, NHWC).
612 };
613 
614 /// A L2NormalizationDescriptor for the L2NormalizationLayer.
616 {
618  : m_Eps(1e-12f)
619  , m_DataLayout(DataLayout::NCHW)
620  {}
621 
622  bool operator ==(const L2NormalizationDescriptor& rhs) const
623  {
624  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
625  }
626 
627  /// Used to avoid dividing by zero.
628  float m_Eps;
629  /// The data layout to be used (NCHW, NHWC).
631 };
632 
633 /// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
635 {
637  : m_Eps(0.0001f)
638  , m_DataLayout(DataLayout::NCHW)
639  {}
640 
642  {
643  return m_Eps == rhs.m_Eps && m_DataLayout == rhs.m_DataLayout;
644  }
645 
646  /// Value to add to the variance. Used to avoid dividing by zero.
647  float m_Eps;
648  /// The data layout to be used (NCHW, NHWC).
650 };
651 
652 /// An InstanceNormalizationDescriptor for InstanceNormalizationLayer
654 {
656  : m_Gamma(1.0f)
657  , m_Beta(0.0f)
658  , m_Eps(1e-12f)
659  , m_DataLayout(DataLayout::NCHW)
660  {}
661 
663  {
664  return m_Gamma == rhs.m_Gamma &&
665  m_Beta == rhs.m_Beta &&
666  m_Eps == rhs.m_Eps &&
667  m_DataLayout == rhs.m_DataLayout;
668  }
669 
670  /// Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
671  float m_Gamma;
672  /// Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
673  float m_Beta;
674  /// Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
675  float m_Eps;
676  /// The data layout to be used (NCHW, NHWC).
678 };
679 
680 /// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
682 {
684  : m_BlockShape({1, 1})
685  , m_Crops({{0, 0}, {0, 0}})
686  , m_DataLayout(DataLayout::NCHW)
687  {}
688 
689  BatchToSpaceNdDescriptor(std::vector<unsigned int> blockShape,
690  std::vector<std::pair<unsigned int, unsigned int>> crops)
691  : m_BlockShape(blockShape)
692  , m_Crops(crops)
693  , m_DataLayout(DataLayout::NCHW)
694  {}
695 
696  bool operator ==(const BatchToSpaceNdDescriptor& rhs) const
697  {
698  return m_BlockShape == rhs.m_BlockShape &&
699  m_Crops == rhs.m_Crops &&
700  m_DataLayout == rhs.m_DataLayout;
701  }
702 
703  /// Block shape values.
704  std::vector<unsigned int> m_BlockShape;
705  /// The values to crop from the input dimension.
706  std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
707  /// The data layout to be used (NCHW, NHWC).
709 };
710 
711 /// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
713 {
715  : m_Min(-6.0f)
716  , m_Max(6.0f)
717  {}
718 
720  {
721  return m_Min == rhs.m_Min && m_Max == rhs.m_Max;
722  }
723 
724  /// Minimum value.
725  float m_Min;
726  /// Maximum value.
727  float m_Max;
728 };
729 
730 /// A FillDescriptor for the FillLayer
732 {
734  : m_Value(0)
735  {}
736 
737  FillDescriptor(const float& value)
738  : m_Value(value)
739  {}
740 
741  bool operator ==(const FillDescriptor& rhs) const
742  {
743  return m_Value == rhs.m_Value;
744  }
745 
746  float m_Value;
747 };
748 
749 /// A GatherDescriptor for the GatherLayer.
751 {
753  : m_Axis(0)
754  {}
755 
756  GatherDescriptor(int32_t axis)
757  : m_Axis(axis)
758  {}
759 
760  bool operator ==(const GatherDescriptor& rhs) const
761  {
762  return m_Axis == rhs.m_Axis;
763  }
764 
765  /// The axis in params to gather indices from
766  int32_t m_Axis;
767 };
768 
769 /// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
771 {
773  : m_TargetWidth(0)
774  , m_TargetHeight(0)
775  , m_DataLayout(DataLayout::NCHW)
776  , m_AlignCorners(false)
777  , m_HalfPixelCenters(false)
778  {}
779 
780  bool operator ==(const ResizeBilinearDescriptor& rhs) const
781  {
782  return m_TargetWidth == rhs.m_TargetWidth &&
783  m_TargetHeight == rhs.m_TargetHeight &&
784  m_DataLayout == rhs.m_DataLayout &&
785  m_AlignCorners == rhs.m_AlignCorners &&
786  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
787  }
788 
789  /// Target width value.
790  uint32_t m_TargetWidth;
791  /// Target height value.
792  uint32_t m_TargetHeight;
793  /// The data layout to be used (NCHW, NHWC).
795  /// Aligned corners
797  /// Half Pixel Centers
799 };
800 
801 /// A ResizeDescriptor for the ResizeLayer.
803 {
805  : m_TargetWidth(0)
806  , m_TargetHeight(0)
807  , m_Method(ResizeMethod::NearestNeighbor)
808  , m_DataLayout(DataLayout::NCHW)
809  , m_AlignCorners(false)
810  , m_HalfPixelCenters(false)
811  {}
812 
813  bool operator ==(const ResizeDescriptor& rhs) const
814  {
815  return m_TargetWidth == rhs.m_TargetWidth &&
816  m_TargetHeight == rhs.m_TargetHeight &&
817  m_Method == rhs.m_Method &&
818  m_DataLayout == rhs.m_DataLayout &&
819  m_AlignCorners == rhs.m_AlignCorners &&
820  m_HalfPixelCenters == rhs.m_HalfPixelCenters;
821  }
822 
823  /// Target width value.
824  uint32_t m_TargetWidth;
825  /// Target height value.
826  uint32_t m_TargetHeight;
827  /// The Interpolation method to use
828  /// (Bilinear, NearestNeighbor).
830  /// The data layout to be used (NCHW, NHWC).
832  /// Aligned corners
834  /// Half Pixel Centers
836 };
837 
838 
839 /// A ReshapeDescriptor for the ReshapeLayer.
841 {
843  : m_TargetShape()
844  {}
845 
847  : m_TargetShape(shape)
848  {}
849 
850  bool operator ==(const ReshapeDescriptor& rhs) const
851  {
852  return m_TargetShape == rhs.m_TargetShape;
853  }
854 
855  /// Target shape value.
857 };
858 
859 /// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
861 {
863  : m_BlockShape({1, 1})
864  , m_PadList({{0, 0}, {0, 0}})
865  , m_DataLayout(DataLayout::NCHW)
866  {}
867 
868  SpaceToBatchNdDescriptor(const std::vector<unsigned int>& blockShape,
869  const std::vector<std::pair<unsigned int, unsigned int>>& padList)
870  : m_BlockShape(blockShape)
871  , m_PadList(padList)
872  , m_DataLayout(DataLayout::NCHW)
873  {}
874 
875  bool operator ==(const SpaceToBatchNdDescriptor& rhs) const
876  {
877  return m_BlockShape == rhs.m_BlockShape &&
878  m_PadList == rhs.m_PadList &&
879  m_DataLayout == rhs.m_DataLayout;
880  }
881 
882  /// Block shape value.
883  std::vector<unsigned int> m_BlockShape;
884  /// @brief Specifies the padding values for the input dimension:
885  /// heightPad{top, bottom} widthPad{left, right}.
886  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
887  /// The data layout to be used (NCHW, NHWC).
889 };
890 
891 /// A SpaceToDepthDescriptor for the SpaceToDepthLayer
893 {
896  {}
897 
898  SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
899  : m_BlockSize(blockSize)
900  , m_DataLayout(dataLayout)
901  {}
902 
903  bool operator ==(const SpaceToDepthDescriptor& rhs) const
904  {
905  return m_BlockSize == rhs.m_BlockSize && m_DataLayout == rhs.m_DataLayout;
906  }
907 
908  /// Scalar specifying the input block size. It must be >= 1
909  unsigned int m_BlockSize;
910 
911  /// The data layout to be used (NCHW, NHWC).
913 };
914 
915 /// A DepthToSpaceDescriptor for the DepthToSpaceLayer
917 
918 /// An LstmDescriptor for the LstmLayer.
920 {
922  : m_ActivationFunc(1) // 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid
923  , m_ClippingThresCell(0.0)
924  , m_ClippingThresProj(0.0)
925  , m_CifgEnabled(true)
926  , m_PeepholeEnabled(false)
927  , m_ProjectionEnabled(false)
928  , m_LayerNormEnabled(false)
929  {}
930 
931  bool operator ==(const LstmDescriptor& rhs) const
932  {
933  return m_ActivationFunc == rhs.m_ActivationFunc &&
934  m_ClippingThresCell == rhs.m_ClippingThresCell &&
935  m_ClippingThresProj == rhs.m_ClippingThresProj &&
936  m_CifgEnabled == rhs.m_CifgEnabled &&
937  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
938  m_LayerNormEnabled == rhs.m_LayerNormEnabled;
939  }
940 
941  /// @brief The activation function to use.
942  /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
944  /// Clipping threshold value for the cell state.
946  /// Clipping threshold value for the projection.
948  /// Enable/disable cifg (coupled input & forget gate).
950  /// Enable/disable peephole.
952  /// Enable/disable the projection layer.
954  /// Enable/disable layer normalization
956 };
957 
958 /// A MeanDescriptor for the MeanLayer.
960 {
962  : m_Axis()
963  , m_KeepDims(false)
964  {}
965 
966  MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
967  : m_Axis(axis)
968  , m_KeepDims(keepDims)
969  {}
970 
971  bool operator ==(const MeanDescriptor& rhs) const
972  {
973  return m_Axis == rhs.m_Axis && m_KeepDims == rhs.m_KeepDims;
974  }
975 
976  /// Values for the dimensions to reduce.
977  std::vector<unsigned int> m_Axis;
978  /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
980 };
981 
982 /// A PadDescriptor for the PadLayer.
984 {
985  PadDescriptor() : m_PadValue(0)
986  {}
987 
988  PadDescriptor(const std::vector<std::pair<unsigned int, unsigned int>>& padList, const float& padValue = 0)
989  : m_PadList(padList)
990  , m_PadValue(padValue)
991  {}
992 
993  bool operator ==(const PadDescriptor& rhs) const
994  {
995  return m_PadList == rhs.m_PadList && m_PadValue == rhs.m_PadValue;
996  }
997 
998  /// @brief Specifies the padding for input dimension.
999  /// First is the number of values to add before the tensor in the dimension.
1000  /// Second is the number of values to add after the tensor in the dimension.
1001  /// The number of pairs should match the number of dimensions in the input tensor.
1002  std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
1003 
1004  /// Optional value to use for padding, defaults to 0
1005  float m_PadValue;
1006 };
1007 
1008 /// A SliceDescriptor for the SliceLayer.
1010 {
1011  SliceDescriptor(const std::vector<unsigned int>& begin, const std::vector<unsigned int>& size)
1012  : m_Begin(begin)
1013  , m_Size(size)
1014  {}
1015 
1017  {}
1018 
1019  bool operator ==(const SliceDescriptor& rhs) const
1020  {
1021  return m_Begin == rhs.m_Begin && m_Size == rhs.m_Size;
1022  }
1023 
1024  /// Beginning indices of the slice in each dimension.
1025  std::vector<unsigned int> m_Begin;
1026 
1027  /// Size of the slice in each dimension.
1028  std::vector<unsigned int> m_Size;
1029 };
1030 
1031 /// A StackDescriptor for the StackLayer.
1033 {
1035  : m_Axis(0)
1036  , m_NumInputs(0)
1037  , m_InputShape()
1038  {}
1039 
1040  StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
1041  : m_Axis(axis)
1042  , m_NumInputs(numInputs)
1043  , m_InputShape(inputShape)
1044  {}
1045 
1046  bool operator ==(const StackDescriptor& rhs) const
1047  {
1048  return m_Axis == rhs.m_Axis &&
1049  m_NumInputs == rhs.m_NumInputs &&
1050  m_InputShape == rhs.m_InputShape;
1051  }
1052 
1053  /// 0-based axis along which to stack the input tensors.
1054  uint32_t m_Axis;
1055  /// Number of input tensors.
1056  uint32_t m_NumInputs;
1057  /// Required shape of all input tensors.
1059 };
1060 
1061 /// A StandInDescriptor for the StandIn layer
1063 {
1065 
1066  StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
1067  : m_NumInputs(numInputs)
1068  , m_NumOutputs(numOutputs)
1069  {}
1070 
1071  bool operator ==(const StandInDescriptor& rhs) const
1072  {
1073  return m_NumInputs == rhs.m_NumInputs &&
1074  m_NumOutputs == rhs.m_NumOutputs;
1075  }
1076 
1077  /// Number of input tensors
1078  uint32_t m_NumInputs = 0;
1079  /// Number of output tensors
1080  uint32_t m_NumOutputs = 0;
1081 };
1082 
1083 /// A StridedSliceDescriptor for the StridedSliceLayer.
1085 {
1086  StridedSliceDescriptor(const std::vector<int>& begin,
1087  const std::vector<int>& end,
1088  const std::vector<int>& stride)
1089  : m_Begin(begin)
1090  , m_End(end)
1091  , m_Stride(stride)
1092  , m_BeginMask(0)
1093  , m_EndMask(0)
1094  , m_ShrinkAxisMask(0)
1095  , m_EllipsisMask(0)
1096  , m_NewAxisMask(0)
1097  , m_DataLayout(DataLayout::NCHW)
1098  {}
1099 
1101  : StridedSliceDescriptor({}, {}, {})
1102  {}
1103 
1104  bool operator ==(const StridedSliceDescriptor& rhs) const
1105  {
1106  return m_Begin == rhs.m_Begin &&
1107  m_End == rhs.m_End &&
1108  m_Stride == rhs.m_Stride &&
1109  m_BeginMask == rhs.m_BeginMask &&
1110  m_EndMask == rhs.m_EndMask &&
1111  m_ShrinkAxisMask == rhs.m_ShrinkAxisMask &&
1112  m_EllipsisMask == rhs.m_EllipsisMask &&
1113  m_NewAxisMask == rhs.m_NewAxisMask &&
1114  m_DataLayout == rhs.m_DataLayout;
1115  }
1116 
1117  int GetStartForAxis(const TensorShape& inputShape, unsigned int axis) const;
1118  int GetStopForAxis(const TensorShape& inputShape,
1119  unsigned int axis,
1120  int startForAxis) const;
1121 
1122  /// Begin values for the input that will be sliced.
1123  std::vector<int> m_Begin;
1124  /// End values for the input that will be sliced.
1125  std::vector<int> m_End;
1126  /// Stride values for the input that will be sliced.
1127  std::vector<int> m_Stride;
1128 
1129  /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
1130  /// range is used for the dimension.
1131  int32_t m_BeginMask;
1132  /// @brief End mask value. If set, then the end is disregarded and the fullest range
1133  /// is used for the dimension.
1134  int32_t m_EndMask;
1135  /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
1137  /// Ellipsis mask value.
1139  /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
1140  /// a new 1 dimension is inserted to this location of the output tensor.
1141  int32_t m_NewAxisMask;
1142 
1143  /// The data layout to be used (NCHW, NHWC).
1145 };
1146 
1147 /// A PreCompiledDescriptor for the PreCompiledLayer.
1149 {
1150  PreCompiledDescriptor(unsigned int numInputSlots = 1u, unsigned int numOutputSlots = 1u)
1151  : m_NumInputSlots(numInputSlots), m_NumOutputSlots(numOutputSlots)
1152  {}
1153 
1154  ~PreCompiledDescriptor() = default;
1155 
1156  unsigned int m_NumInputSlots;
1157  unsigned int m_NumOutputSlots;
1158 };
1159 
1160 /// A QLstmDescriptor for the QLstmLayer.
1162 {
1164  : m_CellClip(0.0)
1165  , m_ProjectionClip(0.0)
1166  , m_CifgEnabled(true)
1167  , m_PeepholeEnabled(false)
1168  , m_ProjectionEnabled(false)
1169  , m_LayerNormEnabled(false)
1170  , m_InputIntermediateScale(0.0)
1171  , m_ForgetIntermediateScale(0.0)
1172  , m_CellIntermediateScale(0.0)
1173  , m_OutputIntermediateScale(0.0)
1174  , m_HiddenStateZeroPoint(0)
1175  , m_HiddenStateScale(0.0)
1176  {}
1177 
1178  bool operator ==(const QLstmDescriptor& rhs) const
1179  {
1180  return m_CellClip == rhs.m_CellClip &&
1181  m_ProjectionClip == rhs.m_ProjectionClip &&
1182  m_CifgEnabled == rhs.m_CifgEnabled &&
1183  m_PeepholeEnabled == rhs.m_PeepholeEnabled &&
1184  m_ProjectionEnabled == rhs.m_ProjectionEnabled &&
1185  m_LayerNormEnabled == rhs.m_LayerNormEnabled &&
1186  m_InputIntermediateScale == rhs.m_InputIntermediateScale &&
1187  m_ForgetIntermediateScale == rhs.m_ForgetIntermediateScale &&
1188  m_CellIntermediateScale == rhs.m_CellIntermediateScale &&
1189  m_OutputIntermediateScale == rhs.m_OutputIntermediateScale &&
1190  m_HiddenStateZeroPoint == rhs.m_HiddenStateZeroPoint &&
1191  m_HiddenStateScale == rhs.m_HiddenStateScale;
1192  }
1193 
1194  /// Clipping threshold value for the cell state
1195  float m_CellClip;
1196  /// Clipping threshold value for the projection
1198  /// Enable/disable CIFG (coupled input & forget gate).
1200  /// Enable/disable peephole
1202  /// Enable/disable the projection layer
1204  /// Enable/disable layer normalization
1206  /// Input intermediate quantization scale
1208  /// Forget intermediate quantization scale
1210  /// Cell intermediate quantization scale
1212  /// Output intermediate quantization scale
1214  /// Hidden State zero point
1216  /// Hidden State quantization scale
1218 };
1219 
1220 /// A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
1222 {
1224  m_PadLeft(0),
1225  m_PadRight(0),
1226  m_PadTop(0),
1227  m_PadBottom(0),
1228  m_StrideX(0),
1229  m_StrideY(0),
1230  m_BiasEnabled(false),
1231  m_DataLayout(DataLayout::NCHW),
1232  m_OutputShapeEnabled(false)
1233  {}
1234 
1236  {
1237  return m_PadLeft == rhs.m_PadLeft &&
1238  m_PadRight == rhs.m_PadRight &&
1239  m_PadTop == rhs.m_PadTop &&
1240  m_PadBottom == rhs.m_PadBottom &&
1241  m_StrideX == rhs.m_StrideX &&
1242  m_StrideY == rhs.m_StrideY &&
1243  m_BiasEnabled == rhs.m_BiasEnabled &&
1244  m_DataLayout == rhs.m_DataLayout &&
1245  m_OutputShapeEnabled == rhs.m_OutputShapeEnabled &&
1246  m_OutputShape == rhs.m_OutputShape;
1247  }
1248 
1249  /// Padding left value in the width dimension.
1250  uint32_t m_PadLeft;
1251  /// Padding right value in the width dimension.
1252  uint32_t m_PadRight;
1253  /// Padding top value in the height dimension.
1254  uint32_t m_PadTop;
1255  /// Padding bottom value in the height dimension.
1256  uint32_t m_PadBottom;
1257  /// Stride value when proceeding through input for the width dimension.
1258  uint32_t m_StrideX;
1259  /// Stride value when proceeding through input for the height dimension.
1260  uint32_t m_StrideY;
1261  /// Enable/disable bias.
1263  /// The data layout to be used (NCHW, NHWC).
1265  /// Output shape if it has been specified.
1267  std::vector<unsigned int> m_OutputShape;
1268 };
1269 
1270 /// A TransposeDescriptor for the TransposeLayer.
1272 {
1274  : m_DimMappings{}
1275  {}
1276 
1278  : m_DimMappings(dimMappings)
1279  {}
1280 
1281  bool operator ==(const TransposeDescriptor &rhs) const
1282  {
1283  return m_DimMappings.IsEqual(rhs.m_DimMappings);
1284  }
1285 
1286  /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
1287  /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
1289 };
1290 
1291 /// A LogicalBinaryDescriptor for the LogicalBinaryLayer
1293 {
1296  {}
1297 
1299  : m_Operation(operation)
1300  {}
1301 
1302  bool operator ==(const LogicalBinaryDescriptor &rhs) const
1303  {
1304  return m_Operation == rhs.m_Operation;
1305  }
1306 
1307  /// Specifies the logical operation to execute
1309 };
1310 
1311 /// A ReduceDescriptor for the REDUCE operators.
1313 {
1315  : m_KeepDims(false)
1316  , m_vAxis()
1317  , m_ReduceOperation(ReduceOperation::Sum)
1318  {}
1319 
1320  bool operator ==(const ReduceDescriptor& rhs) const
1321  {
1322  return m_KeepDims == rhs.m_KeepDims &&
1323  m_vAxis == rhs.m_vAxis &&
1324  m_ReduceOperation == rhs.m_ReduceOperation;
1325  }
1326 
1327  /// if true then output shape has no change.
1329  /// The indices of the dimensions to reduce.
1330  std::vector<uint32_t> m_vAxis;
1331  /// Specifies the reduction operation to execute
1333 };
1334 
1335 } // namespace armnn
ElementwiseUnaryDescriptor(UnaryOperation operation)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
MeanDescriptor(const std::vector< unsigned int > &axis, bool keepDims)
bool m_HalfPixelCenters
Half Pixel Centers.
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PreCompiledDescriptor(unsigned int numInputSlots=1u, unsigned int numOutputSlots=1u)
bool m_AlignCorners
Aligned corners.
SliceDescriptor(const std::vector< unsigned int > &begin, const std::vector< unsigned int > &size)
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout
Definition: Types.hpp:54
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:78
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout::NCHW false
PermuteDescriptor(const PermutationVector &dimMappings)
uint32_t m_PoolWidth
Pooling width value.
bool m_PeepholeEnabled
Enable/disable peephole.
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_KeepDims
if true then output shape has no change.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
ActivationDescriptor(armnn::ActivationFunction activation, float a=0, float b=0)
Definition: Descriptors.hpp:33
The padding fields don&#39;t count and are ignored.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:70
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
NormalizationAlgorithmChannel
Definition: Types.hpp:160
bool m_OutputShapeEnabled
Output shape if it has been specified.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadTop
Padding top value in the height dimension.
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
PoolingAlgorithm
Definition: Types.hpp:115
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
StridedSliceDescriptor(const std::vector< int > &begin, const std::vector< int > &end, const std::vector< int > &stride)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LogicalBinaryOperation
Definition: Types.hpp:99
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
PaddingMethod
The padding method modifies the output of pooling layers.
Definition: Types.hpp:152
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
ComparisonDescriptor(ComparisonOperation operation)
Definition: Descriptors.hpp:84
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ComparisonOperation
Definition: Types.hpp:89
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
ReduceOperation
Definition: Types.hpp:122
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
bool m_LayerNormEnabled
Enable/disable layer normalization.
DataType
Definition: Types.hpp:36
float m_NmsIouThreshold
Intersection over union threshold.
TransposeDescriptor(const PermutationVector &dimMappings)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
PadDescriptor(const std::vector< std::pair< unsigned int, unsigned int >> &padList, const float &padValue=0)
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
A L2NormalizationDescriptor for the L2NormalizationLayer.
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:56
An OriginsDescriptor for the ConcatLayer.
A ReduceDescriptor for the REDUCE operators.
float m_ProjectionClip
Clipping threshold value for the projection.
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
float m_InputIntermediateScale
Input intermediate quantization scale.
A FakeQuantizationDescriptor for the FakeQuantizationLayer.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
bool m_PeepholeEnabled
Enable/disable peephole.
Status
enumeration
Definition: Types.hpp:30
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
SpaceToBatchNdDescriptor(const std::vector< unsigned int > &blockShape, const std::vector< std::pair< unsigned int, unsigned int >> &padList)
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:94
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
OutputShapeRounding
Definition: Types.hpp:174
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
uint32_t m_DilationX
Dilation along x axis.
FillDescriptor(const float &value)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
uint32_t m_PadLeft
Padding left value in the width dimension.
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:98
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
SpaceToDepthDescriptor(unsigned int blockSize, DataLayout dataLayout)
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:83
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_NumInputs
Number of input tensors.
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
ResizeMethod
Definition: Types.hpp:130
A MeanDescriptor for the MeanLayer.
UnaryOperation
Definition: Types.hpp:105
bool m_LayerNormEnabled
Enable/disable layer normalization.
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:72
float m_ScaleY
Center size encoding scale y.
#define ARMNN_DEPRECATED_MSG(message)
Definition: Deprecated.hpp:43
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
float m_NmsScoreThreshold
NMS score threshold.
A PreCompiledDescriptor for the PreCompiledLayer.
GatherDescriptor(int32_t axis)
Krichevsky 2012: Local Brightness Normalization.
A Pooling2dDescriptor for the Pooling2dLayer.
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
NormalizationAlgorithmMethod
Definition: Types.hpp:166
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape &inputShape)
ReshapeDescriptor(const TensorShape &shape)
float m_CellIntermediateScale
Cell intermediate quantization scale.
LogicalBinaryDescriptor(LogicalBinaryOperation operation)
DetectionPostProcessDescriptor()
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
BatchToSpaceNdDescriptor(std::vector< unsigned int > blockShape, std::vector< std::pair< unsigned int, unsigned int >> crops)
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
armnn::DataType m_Output_Type
Deprecated and will be removed in future release.
Definition: Descriptors.hpp:74
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
ActivationFunction
Definition: Types.hpp:67
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
bool m_ConstantWeights
Enable/disable constant weights and biases.