ArmNN
 20.08
ArmComputeUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/Tensor.hpp>
10 
11 #include <arm_compute/core/Types.h>
12 
13 namespace armnn
14 {
15 
16 inline arm_compute::NormalizationLayerInfo
18  armnn::DataLayout dataLayout)
19 {
20  unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
21  const unsigned int depth = tensorInfo.GetShape()[depthDimension];
22 
23  // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
24  // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
25  // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
26  // parameters.
27  //
28  // Please refer to both the reference implementation of the normalization layer and the implementation of
29  // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
30 
31  // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
32  // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
33  // ACL's normalization_layer_cross_map() CL function.
34  const uint32_t normSize = depth * 2u + 1u;
35 
36  // See ACL's NormalizationLayerInfo::scale_coeff() definition.
37  // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
38  const float alpha = 1.0f;
39 
40  // Don't offset the reduction.
41  const float kappa = 0.0f;
42 
43  // pow(reduction, -0.5) = 1 / sqrt(reduction)
44  const float beta = 0.5f;
45 
46  return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
47 }
48 
51 {
52  using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
53 
54  switch (armnnFunction)
55  {
56  case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
57  // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
58  case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
59  case ActivationFunction::ReLu: return AclActivationFunction::RELU;
60  case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
61  case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
62  case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
63  case ActivationFunction::Abs: return AclActivationFunction::ABS;
64  case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
65  case ActivationFunction::Square: return AclActivationFunction::SQUARE;
66  case ActivationFunction::TanH: return AclActivationFunction::TANH;
67  case ActivationFunction::Elu: return AclActivationFunction::ELU;
68  case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH;
69  default: throw InvalidArgumentException("Unsupported activation function");
70  }
71 }
72 
73 inline arm_compute::ActivationLayerInfo
75 {
76  return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
77  actDesc.m_A, actDesc.m_B);
78 }
79 
81 {
82  switch (descriptor.m_Operation)
83  {
84  case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater;
85  case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual;
86  case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less;
87  case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual;
88  case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal;
89  case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual;
90  default: throw InvalidArgumentException("Unsupported comparison function");
91  }
92 }
93 
94 inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
95 {
96  using arm_compute::PoolingType;
97 
98  switch (poolingAlgorithm)
99  {
100  case PoolingAlgorithm::Max: return PoolingType::MAX;
101  case PoolingAlgorithm::Average: return PoolingType::AVG;
103  default: throw InvalidArgumentException("Unsupported pooling algorithm");
104  }
105 }
106 
108  rounding)
109 {
110  using arm_compute::DimensionRoundingType;
111 
112  switch (rounding)
113  {
114  case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
115  case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
116  default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
117  }
118 }
119 
120 inline arm_compute::NormType
122 {
123  using arm_compute::NormType;
124  switch (channelType)
125  {
126  case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
127  case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
128  default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
129  }
130 }
131 
132 inline arm_compute::FullyConnectedLayerInfo
134 {
135  arm_compute::FullyConnectedLayerInfo fc_info;
136  fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
137  return fc_info;
138 }
139 
140 inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
141 {
142  switch (resizeMethod)
143  {
145  return arm_compute::InterpolationPolicy::BILINEAR;
147  return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
148  default:
149  throw InvalidArgumentException("Unsupported resize method");
150  }
151 }
152 
153 template<typename T>
154 inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
155 {
156  // Detect the Android default value of -1 and return the ACL default value of 0.
157  if (softmaxDesc.m_Axis == -1)
158  {
159  return 0;
160  }
161 
162  unsigned int dim = tensor.GetNumDimensions();
163 
164  ARMNN_ASSERT(dim != 0);
165 
166  // Currently ArmNN support axis 1.
167  auto aclAxis = (static_cast<T>(dim) - 1);
168  aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
169 
170  return aclAxis;
171 }
172 
173 inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
174 {
175  unsigned int numSplit = desc.GetNumViews();
176  unsigned int numDimensions = desc.GetNumDimensions();
177  std::set<unsigned int> splitAxis;
178 
179  for (unsigned int i = 0; i < numSplit; ++i)
180  {
181  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
182  {
183  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
184  {
185  splitAxis.insert(dimIdx);
186  }
187  }
188  }
189  return splitAxis;
190 }
191 
192 /// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank)
193 inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
194 {
195  int rank = static_cast<int>(tensor.GetNumDimensions());
196 
197  ARMNN_ASSERT(rank != 0);
198  ARMNN_ASSERT((-1 * rank) <= armnnAxis);
199  ARMNN_ASSERT(armnnAxis < rank);
200 
201  int sign = (armnnAxis < 0) ? -1 : 1;
202  int aclAxis = sign * rank - 1 - armnnAxis;
203 
204  return aclAxis;
205 }
206 
207 /// Function to convert axis to its positive equivalent value.
208 /// [-rank, rank) --> [0, rank)
209 inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo& tensor)
210 {
211  int rank = static_cast<int>(tensor.GetNumDimensions());
212 
213  ARMNN_ASSERT(rank != 0);
214  ARMNN_ASSERT((-1 * rank) <= axis);
215  ARMNN_ASSERT(axis < rank);
216 
217  int positiveAxis = (axis < 0) ? rank + axis : axis;
218  return static_cast<unsigned int>(positiveAxis);
219 }
220 
221 } // namespace armnn
A ViewsDescriptor for the SplitterLayer.
arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
int ComputeAclAxis(const int &armnnAxis, const armnn::TensorInfo &tensor)
Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank...
DataLayout
Definition: Types.hpp:49
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:70
arm_compute::NormalizationLayerInfo CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo &tensorInfo, armnn::DataLayout dataLayout)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
NormalizationAlgorithmChannel
Definition: Types.hpp:133
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
uint32_t GetNumViews() const
Get the number of views.
Copyright (c) 2020 ARM Limited.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
PoolingAlgorithm
Definition: Types.hpp:96
unsigned int ComputePositiveAxis(const int &axis, const armnn::TensorInfo &tensor)
Function to convert axis to its positive equivalent value.
arm_compute::ActivationLayerInfo::ActivationFunction ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
ComparisonOperation
Definition: Types.hpp:77
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc)
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)
A FullyConnectedDescriptor for the FullyConnectedLayer.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
min(a, max(b, input)) ReLu1 & ReLu6.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:86
OutputShapeRounding
Definition: Types.hpp:147
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:45
ResizeMethod
Definition: Types.hpp:103
arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor &descriptor)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191
T ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:47
A SoftmaxDescriptor for the SoftmaxLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
ActivationFunction
Definition: Types.hpp:55