ArmNN
 23.02
ArmComputeUtils.hpp File Reference
#include <armnn/Descriptors.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/backends/WorkloadData.hpp>
#include <arm_compute/core/Types.h>
#include <arm_compute/runtime/FunctionDescriptors.h>
#include "neon/workloads/NeonReduceWorkload.hpp"
#include "cl/workloads/ClReduceWorkload.hpp"

Go to the source code of this file.

Namespaces

 armnn
 Copyright (c) 2021 ARM Limited and Contributors.
 

Macros

#define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status)
 Macro function check if layer with multiple axes is supported on each backend. More...
 

Functions

arm_compute::NormalizationLayerInfo CreateAclNormalizationLayerInfoForL2Normalization (const armnn::TensorInfo &tensorInfo, armnn::DataLayout dataLayout)
 
arm_compute::ActivationLayerInfo::ActivationFunction ConvertActivationFunctionToAclActivationFunction (ActivationFunction armnnFunction)
 
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo (const ActivationDescriptor &actDesc)
 
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo (const ActivationDescriptor *activationDescPtr)
 
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo (const QueueDescriptor &queueDescriptor)
 
arm_compute::ActivationLayerInfo ConvertLstmActivationFuncToAclLayerInfo (uint32_t activationFunction)
 
arm_compute::ComparisonOperation ConvertComparisonOperationToAcl (const ComparisonDescriptor &descriptor)
 
arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType (PoolingAlgorithm poolingAlgorithm)
 
arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType (OutputShapeRounding rounding)
 
arm_compute::NormType ConvertNormalizationAlgorithmChannelToAclNormType (NormalizationAlgorithmChannel channelType)
 
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo (const FullyConnectedDescriptor &fullyConnectedDesc, const ActivationDescriptor *activationDesc)
 
arm_compute::FullyConnectedLayerInfo ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo (const FullyConnectedDescriptor &fullyConnectedDesc, arm_compute::ActivationLayerInfo activationLayerInfo)
 
arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy (ResizeMethod resizeMethod)
 
template<typename T >
ComputeSoftmaxAclAxis (const SoftmaxDescriptor &softmaxDesc, const armnn::TensorInfo &tensor)
 
std::set< unsigned int > ComputeSplitAxis (const armnn::SplitterDescriptor &desc, const TensorShape &input)
 
int ComputeAclAxis (const int &armnnAxis, const armnn::TensorInfo &tensor)
 Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank) More...
 
unsigned int ComputePositiveAxis (const int &axis, const armnn::TensorInfo &tensor)
 Function to convert axis to its positive equivalent value. More...
 
arm_compute::Conv3dInfo ComputeConv3DInfo (const armnn::Convolution3dDescriptor descriptor, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
 Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor. More...
 
arm_compute::Conv3dInfo ComputeConv3DInfo (const armnn::Convolution3dQueueDescriptor queueDescriptor, bool isFastMathEnabled)
 
arm_compute::PaddingMode ConvertPaddingModeToAcl (const PaddingMode &paddingMode)
 
arm_compute::ReductionOperation ConvertReductionOperationToAcl (const ReduceDescriptor &descriptor)
 
const TensorInfo ComputeReductionTensorShape (const armnn::TensorInfo &input, const std::vector< uint32_t > &vAxis, const bool keepDims)
 Function to compute the output tensor shape based on the axes and if keepDims is set. More...
 

Macro Definition Documentation

◆ IS_MULTI_AXES_REDUCE_SUPPORTED

#define IS_MULTI_AXES_REDUCE_SUPPORTED (   func,
  input,
  desc,
  status 
)
Value:
armnn::TensorInfo inputTensorInfo = input; \
unsigned int recalulatedAxis = 0; \
std::vector<uint32_t> axes; \
\
for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \
{ \
axes.emplace_back(desc.m_vAxis[i]); \
\
const armnn::TensorInfo& reducedTensorInfo = \
ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \
\
std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \
\
armnn::ReduceDescriptor newReduceDescriptor = desc; \
newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \
\
status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \
if (!status) \
{ \
break; \
} \
\
if (!desc.m_KeepDims) \
{ \
recalulatedAxis++; \
} \
\
inputTensorInfo = reducedTensorInfo; \
}

Macro function check if layer with multiple axes is supported on each backend.

Definition at line 405 of file ArmComputeUtils.hpp.

armnn::TensorInfo
Definition: Tensor.hpp:152