13 #include <arm_compute/runtime/FunctionDescriptors.h>
14 #include <arm_compute/function_info/FullyConnectedLayerInfo.h>
16 #if defined(ARMCOMPUTENEON_ENABLED)
20 #if defined(ARMCOMPUTECL_ENABLED)
27 inline arm_compute::NormalizationLayerInfo
32 const unsigned int depth = tensorInfo.
GetShape()[depthDimension];
45 const uint32_t normSize = depth * 2u + 1u;
49 const float alpha = 1.0f;
52 const float kappa = 0.0f;
55 const float beta = 0.5f;
57 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa,
false);
65 switch (armnnFunction)
84 inline arm_compute::ActivationLayerInfo
91 inline arm_compute::ActivationLayerInfo
94 if (activationDescPtr !=
nullptr)
99 return arm_compute::ActivationLayerInfo();
102 inline arm_compute::ActivationLayerInfo
107 if (activationDescPtr !=
nullptr)
110 *activationDescPtr));
112 return arm_compute::ActivationLayerInfo();
115 inline arm_compute::ActivationLayerInfo
119 switch (activationFunction)
122 return arm_compute::ActivationLayerInfo();
124 return arm_compute::ActivationLayerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
126 return arm_compute::ActivationLayerInfo(
127 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
129 return arm_compute::ActivationLayerInfo(
130 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
132 return arm_compute::ActivationLayerInfo(
133 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
155 using arm_compute::PoolingType;
157 switch (poolingAlgorithm)
169 using arm_compute::DimensionRoundingType;
179 inline arm_compute::NormType
182 using arm_compute::NormType;
191 inline arm_compute::FullyConnectedLayerInfo
195 arm_compute::FullyConnectedLayerInfo fc_info;
201 inline arm_compute::FullyConnectedLayerInfo
203 arm_compute::ActivationLayerInfo activationLayerInfo)
205 arm_compute::FullyConnectedLayerInfo fc_info;
207 fc_info.activation_info = activationLayerInfo;
213 switch (resizeMethod)
216 return arm_compute::InterpolationPolicy::BILINEAR;
218 return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
228 if (softmaxDesc.
m_Axis == -1)
238 auto aclAxis = (
static_cast<T
>(dim) - 1);
239 aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
248 std::set<unsigned int> splitAxis;
250 for (
unsigned int i = 0; i < numSplit; ++i)
252 for (
unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
256 splitAxis.insert(dimIdx);
272 int sign = (armnnAxis < 0) ? -1 : 1;
273 int aclAxis = sign * rank - 1 - armnnAxis;
288 int positiveAxis = (axis < 0) ? rank + axis : axis;
289 return static_cast<unsigned int>(positiveAxis);
294 bool isFastMathEnabled,
303 const arm_compute::ActivationLayerInfo activationInfo =
305 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
307 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
311 bool isFastMathEnabled)
314 const arm_compute::Size3D stride{descriptor.
m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
315 const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
316 descriptor.m_PadTop, descriptor.m_PadBottom,
317 descriptor.m_PadFront, descriptor.m_PadBack};
318 const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
320 const arm_compute::ActivationLayerInfo activationInfo =
322 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
324 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
353 const std::vector<uint32_t>& vAxis,
356 auto reducedTensorInfo = input;
358 unsigned int outputRank = 0;
364 else if (vAxis.empty())
368 else if (vAxis.size() > reducedTensorInfo.GetNumDimensions())
374 outputRank = reducedTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(vAxis.size());
380 std::vector<unsigned int> dimSizes(outputRank, 1);
384 unsigned int outputIndex = 0;
385 for (
unsigned int i = 0; i < reducedTensorInfo.GetNumDimensions(); ++i)
387 if (std::find(vAxis.begin(), vAxis.end(), i) == vAxis.end())
389 dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(reducedTensorInfo.GetShape()[i]);
394 dimSizes[outputIndex] = 1;
400 reducedTensorInfo.SetShape(inferredShape);
401 return reducedTensorInfo;
405 #define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status) \
406 armnn::TensorInfo inputTensorInfo = input; \
407 unsigned int recalulatedAxis = 0; \
408 std::vector<uint32_t> axes; \
410 for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \
412 axes.emplace_back(desc.m_vAxis[i]); \
414 const armnn::TensorInfo& reducedTensorInfo = \
415 ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \
417 std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \
419 armnn::ReduceDescriptor newReduceDescriptor = desc; \
420 newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \
422 status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \
428 if (!desc.m_KeepDims) \
433 inputTensorInfo = reducedTensorInfo; \