ArmNN
 20.02
ArmComputeUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/Tensor.hpp>
9 
10 #include <arm_compute/core/Types.h>
11 
12 #include <boost/assert.hpp>
13 
14 namespace armnn
15 {
16 
17 inline arm_compute::NormalizationLayerInfo
19  armnn::DataLayout dataLayout)
20 {
21  unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
22  const unsigned int depth = tensorInfo.GetShape()[depthDimension];
23 
24  // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
25  // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
26  // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
27  // parameters.
28  //
29  // Please refer to both the reference implementation of the normalization layer and the implementation of
30  // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
31 
32  // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
33  // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
34  // ACL's normalization_layer_cross_map() CL function.
35  const uint32_t normSize = depth * 2u + 1u;
36 
37  // See ACL's NormalizationLayerInfo::scale_coeff() definition.
38  // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
39  const float alpha = 1.0f;
40 
41  // Don't offset the reduction.
42  const float kappa = 0.0f;
43 
44  // pow(reduction, -0.5) = 1 / sqrt(reduction)
45  const float beta = 0.5f;
46 
47  return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
48 }
49 
52 {
53  using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
54 
55  switch (armnnFunction)
56  {
57  case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
58  // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
59  case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
60  case ActivationFunction::ReLu: return AclActivationFunction::RELU;
61  case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
62  case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
63  case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
64  case ActivationFunction::Abs: return AclActivationFunction::ABS;
65  case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
66  case ActivationFunction::Square: return AclActivationFunction::SQUARE;
67  case ActivationFunction::TanH: return AclActivationFunction::TANH;
68  case ActivationFunction::Elu: return AclActivationFunction::ELU;
69  default: throw InvalidArgumentException("Unsupported activation function");
70  }
71 }
72 
73 inline arm_compute::ActivationLayerInfo
75 {
76  return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
77  actDesc.m_A, actDesc.m_B);
78 }
79 
80 inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
81 {
82  using arm_compute::PoolingType;
83 
84  switch (poolingAlgorithm)
85  {
86  case PoolingAlgorithm::Max: return PoolingType::MAX;
87  case PoolingAlgorithm::Average: return PoolingType::AVG;
89  default: throw InvalidArgumentException("Unsupported pooling algorithm");
90  }
91 }
92 
94  rounding)
95 {
96  using arm_compute::DimensionRoundingType;
97 
98  switch (rounding)
99  {
100  case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
101  case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
102  default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
103  }
104 }
105 
106 inline arm_compute::NormType
108 {
109  using arm_compute::NormType;
110  switch (channelType)
111  {
112  case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
113  case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
114  default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
115  }
116 }
117 
118 inline arm_compute::FullyConnectedLayerInfo
120 {
121  arm_compute::FullyConnectedLayerInfo fc_info;
122  fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
123  return fc_info;
124 }
125 
126 inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
127 {
128  switch (resizeMethod)
129  {
131  return arm_compute::InterpolationPolicy::BILINEAR;
133  return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
134  default:
135  throw InvalidArgumentException("Unsupported resize method");
136  }
137 }
138 
139 inline unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
140 {
141  // Detect the Android default value of -1 and return the ACL default value of 1.
142  if (softmaxDesc.m_Axis == -1)
143  {
144  return 1;
145  }
146 
147  unsigned int dim = tensor.GetNumDimensions();
148 
149  BOOST_ASSERT(dim != 0);
150 
151  // Currently ArmNN support axis 1.
152  return dim - 1;
153 }
154 
155 inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
156 {
157  unsigned int numSplit = desc.GetNumViews();
158  unsigned int numDimensions = desc.GetNumDimensions();
159  std::set<unsigned int> splitAxis;
160 
161  for (unsigned int i = 0; i < numSplit; ++i)
162  {
163  for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
164  {
165  if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
166  {
167  splitAxis.insert(dimIdx);
168  }
169  }
170  }
171  return splitAxis;
172 }
173 
174 } // namespace armnn
A ViewsDescriptor for the SplitterLayer.
arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
DataLayout
Definition: Types.hpp:49
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
uint32_t GetNumDimensions() const
Get the number of dimensions.
arm_compute::NormalizationLayerInfo CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo &tensorInfo, armnn::DataLayout dataLayout)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
NormalizationAlgorithmChannel
Definition: Types.hpp:126
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
uint32_t GetNumViews() const
Get the number of views.
Copyright (c) 2020 ARM Limited.
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
PoolingAlgorithm
Definition: Types.hpp:96
arm_compute::ActivationLayerInfo::ActivationFunction ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor &fullyConnectedDesc)
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding rounding)
A FullyConnectedDescriptor for the FullyConnectedLayer.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
min(a, max(b, input)) ReLu1 & ReLu6.
OutputShapeRounding
Definition: Types.hpp:140
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
ResizeMethod
Definition: Types.hpp:103
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
A SoftmaxDescriptor for the SoftmaxLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
ActivationFunction
Definition: Types.hpp:55