ArmNN
 22.11
NeonLayerSupport Class Reference

#include <NeonLayerSupport.hpp>

Inheritance diagram for NeonLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

 NeonLayerSupport (const IBackendInternal::IBackendSpecificModelContextPtr &modelContextPtr)
 
 NeonLayerSupport ()
 
 ~NeonLayerSupport ()
 
bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &quantizedLstmParamsInfo, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reason=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from ILayerSupport
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsActivationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsAdditionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsArgMinMaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchToSpaceNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsCastSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsChannelShuffleSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsComparisonSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertBf16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp32ToBf16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp32ToFp16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDebugSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthToSpaceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDequantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDivisionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsElementwiseUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFakeQuantizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFillSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFloorSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFullyConnectedSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsGatherSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInputSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInstanceNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsL2NormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalBinarySupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMaximumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMeanSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemCopySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemImportSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMergeSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMinimumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMultiplicationSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsOutputSupported(const TensorInfo &output
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPadSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPermuteSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreCompiledSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreluSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizedLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsRankSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReduceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReshapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsResizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsShapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSliceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToBatchNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToDepthSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSplitterSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSubtractionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSwitchSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input
 

Additional Inherited Members

- Public Attributes inherited from ILayerSupport
const TensorInfooutput
 
const TensorInfo const ActivationDescriptordescriptor
 
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoinput1
 
const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ArgMinMaxDescriptordescriptor
 
const TensorInfo const ArgMinMaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfomean
 
const TensorInfo const TensorInfo const TensorInfovar
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfobeta
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfogamma
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const BatchToSpaceNdDescriptordescriptor
 
const TensorInfo const BatchToSpaceNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ChannelShuffleDescriptordescriptor
 
const TensorInfo const ChannelShuffleDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const ComparisonDescriptordescriptor
 
const TensorInfo const TensorInfo const ComparisonDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConcatSupported(const std Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution2dDescriptordescriptor
 
const TensorInfo const Convolution2dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution3dDescriptordescriptor
 
const TensorInfo const Convolution3dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthToSpaceDescriptordescriptor
 
const TensorInfo const DepthToSpaceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthwiseConvolution2dDescriptordescriptor
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoscores
 
const TensorInfo const TensorInfoanchors
 
const TensorInfo const TensorInfo const TensorInfodetectionBoxes
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionClasses
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionScores
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfonumDetections
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ElementwiseUnaryDescriptordescriptor
 
const TensorInfo const ElementwiseUnaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const FakeQuantizationDescriptordescriptor
 
const FakeQuantizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const FillDescriptordescriptor
 
const TensorInfo const FillDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoweights
 
const TensorInfo const TensorInfo const TensorInfobiases
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const GatherDescriptordescriptor
 
const TensorInfo const TensorInfo const GatherDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const InstanceNormalizationDescriptordescriptor
 
const TensorInfo const InstanceNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const L2NormalizationDescriptordescriptor
 
const TensorInfo const L2NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptordescriptor
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const LogSoftmaxDescriptordescriptor
 
const TensorInfo const LogSoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfooutputStateIn
 
const TensorInfo const TensorInfocellStateIn
 
const TensorInfo const TensorInfo const TensorInfoscratchBuffer
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const MeanDescriptordescriptor
 
const TensorInfo const MeanDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoouput
 
const TensorInfo const NormalizationDescriptordescriptor
 
const TensorInfo const NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PadDescriptordescriptor
 
const TensorInfo const PadDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PermuteDescriptordescriptor
 
const TensorInfo const PermuteDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling2dDescriptordescriptor
 
const TensorInfo const Pooling2dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling3dDescriptordescriptor
 
const TensorInfo const Pooling3dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const PreCompiledDescriptordescriptor
 
const PreCompiledDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoalpha
 
const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReduceDescriptordescriptor
 
const TensorInfo const ReduceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReshapeDescriptordescriptor
 
const TensorInfo const ReshapeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ResizeDescriptordescriptor
 
const TensorInfo const ResizeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SliceDescriptordescriptor
 
const TensorInfo const SliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SoftmaxDescriptordescriptor
 
const TensorInfo const SoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToBatchNdDescriptordescriptor
 
const TensorInfo const SpaceToBatchNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToDepthDescriptordescriptor
 
const TensorInfo const SpaceToDepthDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptordescriptor
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfooutput
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptordescriptor
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfooutput0
 
const TensorInfo const TensorInfo const TensorInfooutput1
 
const TensorInfo const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeConvolution2dDescriptordescriptor
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeDescriptordescriptor
 
const TensorInfo const TransposeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 14 of file NeonLayerSupport.hpp.

Constructor & Destructor Documentation

◆ NeonLayerSupport() [1/2]

Definition at line 144 of file NeonLayerSupport.cpp.

145  : m_ModelContextPtr(modelContextPtr)
146 {
147 }

◆ NeonLayerSupport() [2/2]

Definition at line 149 of file NeonLayerSupport.cpp.

150  : m_ModelContextPtr(nullptr)
151 {
152 }

◆ ~NeonLayerSupport()

~NeonLayerSupport ( )
inline

Definition at line 20 of file NeonLayerSupport.hpp.

References ILayerSupport::alpha, ILayerSupport::beta, ILayerSupport::biases, ILayerSupport::cellStateIn, ILayerSupport::cellStateOut, ILayerSupport::descriptor, ILayerSupport::gamma, ILayerSupport::input1, NeonLayerSupport::IsActivationSupported(), NeonLayerSupport::IsAdditionSupported(), NeonLayerSupport::IsArgMinMaxSupported(), NeonLayerSupport::IsBatchMatMulSupported(), NeonLayerSupport::IsBatchNormalizationSupported(), NeonLayerSupport::IsBatchToSpaceNdSupported(), NeonLayerSupport::IsCastSupported(), NeonLayerSupport::IsChannelShuffleSupported(), NeonLayerSupport::IsComparisonSupported(), NeonLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConstantSupported(), NeonLayerSupport::IsConvertBf16ToFp32Supported(), NeonLayerSupport::IsConvertFp16ToFp32Supported(), NeonLayerSupport::IsConvertFp32ToBf16Supported(), NeonLayerSupport::IsConvertFp32ToFp16Supported(), NeonLayerSupport::IsConvolution2dSupported(), NeonLayerSupport::IsConvolution3dSupported(), NeonLayerSupport::IsDepthToSpaceSupported(), NeonLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsDequantizeSupported(), NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(), NeonLayerSupport::IsDivisionSupported(), NeonLayerSupport::IsElementwiseUnarySupported(), NeonLayerSupport::IsFillSupported(), NeonLayerSupport::IsFloorSupported(), NeonLayerSupport::IsFullyConnectedSupported(), NeonLayerSupport::IsGatherNdSupported(), NeonLayerSupport::IsGatherSupported(), NeonLayerSupport::IsInputSupported(), NeonLayerSupport::IsInstanceNormalizationSupported(), NeonLayerSupport::IsL2NormalizationSupported(), NeonLayerSupport::IsLayerSupported(), NeonLayerSupport::IsLogicalBinarySupported(), NeonLayerSupport::IsLogSoftmaxSupported(), NeonLayerSupport::IsLstmSupported(), NeonLayerSupport::IsMaximumSupported(), NeonLayerSupport::IsMeanSupported(), NeonLayerSupport::IsMinimumSupported(), NeonLayerSupport::IsMultiplicationSupported(), NeonLayerSupport::IsNormalizationSupported(), NeonLayerSupport::IsOutputSupported(), NeonLayerSupport::IsPadSupported(), NeonLayerSupport::IsPermuteSupported(), NeonLayerSupport::IsPooling2dSupported(), NeonLayerSupport::IsPooling3dSupported(), NeonLayerSupport::IsPreluSupported(), NeonLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQuantizedLstmSupported(), NeonLayerSupport::IsQuantizeSupported(), NeonLayerSupport::IsReduceSupported(), NeonLayerSupport::IsReshapeSupported(), NeonLayerSupport::IsResizeSupported(), NeonLayerSupport::IsSliceSupported(), NeonLayerSupport::IsSoftmaxSupported(), NeonLayerSupport::IsSpaceToBatchNdSupported(), NeonLayerSupport::IsSpaceToDepthSupported(), NeonLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsStackSupported(), NeonLayerSupport::IsStridedSliceSupported(), NeonLayerSupport::IsSubtractionSupported(), NeonLayerSupport::IsTransposeConvolution2dSupported(), NeonLayerSupport::IsTransposeSupported(), NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(), ILayerSupport::mean, ILayerSupport::output, ILayerSupport::outputs, ILayerSupport::outputStateIn, ILayerSupport::outputStateOut, ILayerSupport::paramsInfo, ILayerSupport::previousCellStateIn, ILayerSupport::previousOutputIn, ILayerSupport::reasonIfUnsupported, ILayerSupport::scratchBuffer, ILayerSupport::var, and ILayerSupport::weights.

20 {}

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 599 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonActivationWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

603 {
607  input,
608  output,
609  descriptor);
610 }
const TensorInfo & output
arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor)
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 612 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonAdditionWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

616 {
619  input0,
620  input1,
621  output,
622  nullptr);
623 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 625 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonArgMinMaxWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

629 {
632  input,
633  output,
634  descriptor);
635 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonArgMinMaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 637 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchMatMulValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

642 {
645  inputX,
646  inputY,
647  output,
648  descriptor);
649 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonBatchMatMulValidate(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor)
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 651 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchNormalizationValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

659 {
662  input,
663  output,
664  mean,
665  var,
666  beta,
667  gamma,
668  descriptor,
669  nullptr);
670 }
const TensorInfo & output
arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo & var
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const TensorInfo & mean

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 672 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonBatchToSpaceNdWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

676 {
679  input,
680  output,
681  descriptor);
682 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonBatchToSpaceNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 684 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonCastValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

687 {
690  input,
691  output);
692 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonCastValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 694 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonChannelShuffleValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

698 {
701  input,
702  output,
703  descriptor);
704 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonChannelShuffleValidate(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 706 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonComparisonWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

711 {
712 
715  input0,
716  input1,
717  output,
718  descriptor);
719 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonComparisonWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor)
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 721 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, OriginsDescriptor::GetConcatAxis(), OriginsDescriptor::GetNumDimensions(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonConcatWorkloadValidate(), and armnn::SetValueChecked().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

725 {
726  if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
727  {
728  SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
729  return false;
730  }
731 
732  unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
733  if(concatInnerAxis < 3) // Width, height, or channels
734  {
737  inputs,
738  output,
739  descriptor);
740  }
741  else if (concatInnerAxis == 3)
742  {
743  for (auto& input : inputs)
744  {
745  if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
746  {
747  SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
748  return false;
749  }
750  }
751  return true; // Sub-tensors support concat along batch
752  }
753  else // > 4 dimensions not supported.
754  {
755  SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
756  return false;
757  }
758 }
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
Definition: Tensor.cpp:432
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonConcatWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const OriginsDescriptor &descriptor)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 760 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonConstantWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

762 {
765  output);
766 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 768 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

771 {
772  armnn::IgnoreUnused(input);
775  return true;
776 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 778 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

781 {
782  armnn::IgnoreUnused(input);
785  return true;
786 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 788 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

791 {
792  armnn::IgnoreUnused(input);
795  return true;
796 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 798 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

801 {
802  armnn::IgnoreUnused(input);
805  return true;
806 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 808 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution2dWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

814 {
815  bool isFastMathEnabled = false;
816 #if defined(ARMCOMPUTENEON_ENABLED)
817  if (m_ModelContextPtr)
818  {
819  if (m_ModelContextPtr.get() != nullptr)
820  {
821  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
822  if (modelOptions)
823  {
824  isFastMathEnabled = modelOptions->IsFastMathEnabled();
825  }
826  }
827  }
828 #endif
829 
832  input,
833  output,
834  descriptor,
835  weights,
836  biases,
837  isFastMathEnabled,
838  nullptr);
839 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 841 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, NeonBackendModelContext::IsFastMathEnabled(), and armnn::NeonConvolution3dWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

847 {
848  bool isFastMathEnabled = false;
849 #if defined(ARMCOMPUTENEON_ENABLED)
850  if (m_ModelContextPtr)
851  {
852  if (m_ModelContextPtr.get() != nullptr)
853  {
854  auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
855  if (modelOptions)
856  {
857  isFastMathEnabled = modelOptions->IsFastMathEnabled();
858  }
859  }
860  }
861 #endif
862 
865  input,
866  output,
867  descriptor,
868  weights,
869  biases,
870  isFastMathEnabled,
871  nullptr);
872 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonConvolution3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, bool isFastMathEnabled, const ActivationDescriptor *activationDescriptor)
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 874 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthToSpaceWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

878 {
881  input,
882  output,
883  descriptor);
884 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonDepthToSpaceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 886 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

892 {
895  input,
896  output,
897  descriptor,
898  weights,
899  biases,
900  nullptr);
901 }
const TensorInfo & output
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 903 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDequantizeWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

906 {
909  input,
910  output);
911 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reason = EmptyOptional() 
) const
override

Definition at line 913 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDepthwiseConvolutionWorkloadValidate().

Referenced by NeonLayerSupport::~NeonLayerSupport().

919 {
922  input,
923  output,
924  descriptor,
925  weights,
926  biases,
927  nullptr);
928 }
const TensorInfo & output
arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, const ActivationDescriptor *activationDescriptor)
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1184 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonDivisionWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1188 {
1191  input0,
1192  input1,
1193  output,
1194  nullptr);
1195 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 930 of file NeonLayerSupport.cpp.

References armnn::Abs, armnn::Exp, FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::Log, armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::Neg, armnn::NeonAbsWorkloadValidate(), armnn::NeonExpWorkloadValidate(), armnn::NeonLogicalNotWorkloadValidate(), armnn::NeonLogWorkloadValidate(), armnn::NeonNegWorkloadValidate(), armnn::NeonRsqrtWorkloadValidate(), armnn::NeonSinWorkloadValidate(), armnn::NeonSqrtWorkloadValidate(), armnn::Rsqrt, armnn::Sin, and armnn::Sqrt.

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

934 {
935  switch(descriptor.m_Operation)
936  {
937  case UnaryOperation::Abs:
940  input,
941  output);
942  case UnaryOperation::Exp:
945  input,
946  output);
950  input,
951  output);
952  case UnaryOperation::Log:
955  input,
956  output);
957  case UnaryOperation::Neg:
960  input,
961  output);
965  input,
966  output);
967  case UnaryOperation::Sin:
970  input,
971  output);
975  input,
976  output);
977  default:
978  return false;
979  }
980 }
arm_compute::Status NeonNegWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonLogWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonExpWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
arm_compute::Status NeonSqrtWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonSinWorkloadValidate(const TensorInfo &input, const TensorInfo &output)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 982 of file NeonLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

986 {
987  armnn::IgnoreUnused(input);
990 
991  return IsNeonBackendSupported(reasonIfUnsupported);
992 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 994 of file NeonLayerSupport.cpp.

References TensorInfo::GetDataType(), armnn::IgnoreUnused(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

997 {
999  return IsNeonBackendSupported(reasonIfUnsupported) &&
1001  input.GetDataType(),
1002  &FalseFuncF16<>,
1003  &TrueFunc<>,
1004  &FalseFuncU8<>,
1005  &FalseFuncI32<>,
1006  &FalseFuncU8<>);
1007 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
DataType GetDataType() const
Definition: Tensor.hpp:198
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1009 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonFullyConnectedWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1015 {
1018  input,
1019  output,
1020  weights,
1021  biases,
1022  descriptor,
1023  nullptr);
1024 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const Optional< TensorInfo > &biases, const FullyConnectedDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported 
) const

Definition at line 1040 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherNdWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1044 {
1047  input0,
1048  input1,
1049  output);
1050 }
const TensorInfo & output
arm_compute::Status NeonGatherNdWorkloadValidate(const TensorInfo &paramsInfo, const TensorInfo &indicesInfo, const TensorInfo &outputInfo)
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1026 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonGatherWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1031 {
1034  input0,
1035  input1,
1036  output,
1037  descriptor);
1038 }
arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo &input, const TensorInfo &indices, const TensorInfo &output, const GatherDescriptor &descriptor)
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1052 of file NeonLayerSupport.cpp.

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1054 {
1055  return IsNeonBackendSupported(reasonIfUnsupported, input);
1056 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1058 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonInstanceNormalizationWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1062 {
1065  input,
1066  output,
1067  descriptor);
1068 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonInstanceNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1070 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonL2NormalizationWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1074 {
1076 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmParamsInfo,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 154 of file NeonLayerSupport.cpp.

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, ILayerSupport::descriptor, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, NeonLayerSupport::IsActivationSupported(), NeonLayerSupport::IsAdditionSupported(), NeonLayerSupport::IsArgMinMaxSupported(), NeonLayerSupport::IsBatchMatMulSupported(), NeonLayerSupport::IsBatchNormalizationSupported(), NeonLayerSupport::IsBatchToSpaceNdSupported(), NeonLayerSupport::IsCastSupported(), NeonLayerSupport::IsChannelShuffleSupported(), NeonLayerSupport::IsComparisonSupported(), NeonLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConstantSupported(), NeonLayerSupport::IsConvertBf16ToFp32Supported(), NeonLayerSupport::IsConvertFp16ToFp32Supported(), NeonLayerSupport::IsConvertFp32ToBf16Supported(), NeonLayerSupport::IsConvertFp32ToFp16Supported(), NeonLayerSupport::IsConvolution2dSupported(), NeonLayerSupport::IsConvolution3dSupported(), NeonLayerSupport::IsDepthToSpaceSupported(), NeonLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsDequantizeSupported(), LayerSupportBase::IsDetectionPostProcessSupported(), NeonLayerSupport::IsDivisionSupported(), NeonLayerSupport::IsElementwiseUnarySupported(), NeonLayerSupport::IsFillSupported(), NeonLayerSupport::IsFloorSupported(), NeonLayerSupport::IsFullyConnectedSupported(), NeonLayerSupport::IsGatherNdSupported(), NeonLayerSupport::IsGatherSupported(), NeonLayerSupport::IsInputSupported(), NeonLayerSupport::IsInstanceNormalizationSupported(), NeonLayerSupport::IsL2NormalizationSupported(), NeonLayerSupport::IsLogicalBinarySupported(), NeonLayerSupport::IsLogSoftmaxSupported(), NeonLayerSupport::IsLstmSupported(), NeonLayerSupport::IsMaximumSupported(), NeonLayerSupport::IsMeanSupported(), LayerSupportBase::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), NeonLayerSupport::IsMinimumSupported(), NeonLayerSupport::IsMultiplicationSupported(), NeonLayerSupport::IsNormalizationSupported(), NeonLayerSupport::IsOutputSupported(), NeonLayerSupport::IsPadSupported(), NeonLayerSupport::IsPermuteSupported(), NeonLayerSupport::IsPooling2dSupported(), NeonLayerSupport::IsPooling3dSupported(), NeonLayerSupport::IsPreluSupported(), NeonLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQuantizedLstmSupported(), NeonLayerSupport::IsQuantizeSupported(), NeonLayerSupport::IsReduceSupported(), NeonLayerSupport::IsReshapeSupported(), NeonLayerSupport::IsResizeSupported(), LayerSupportBase::IsShapeSupported(), NeonLayerSupport::IsSliceSupported(), NeonLayerSupport::IsSoftmaxSupported(), NeonLayerSupport::IsSpaceToBatchNdSupported(), NeonLayerSupport::IsSpaceToDepthSupported(), NeonLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsStackSupported(), NeonLayerSupport::IsStridedSliceSupported(), NeonLayerSupport::IsSubtractionSupported(), NeonLayerSupport::IsTransposeConvolution2dSupported(), NeonLayerSupport::IsTransposeSupported(), NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::PolymorphicDowncast(), armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, ILayerSupport::reasonIfUnsupported, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by NeonLayerSupport::~NeonLayerSupport().

160 {
161  switch (type)
162  {
164  return IsActivationSupported(infos[0],
165  infos[1],
166  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
168  case LayerType::Addition:
169  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
171  return IsArgMinMaxSupported(infos[0],
172  infos[1],
173  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
176  return IsBatchMatMulSupported(infos[0],
177  infos[1],
178  infos[2],
179  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
182  return IsBatchNormalizationSupported(infos[0],
183  infos[1],
184  infos[2],
185  infos[3],
186  infos[4],
187  infos[5],
188  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
189  (&descriptor)),
192  return IsBatchToSpaceNdSupported(infos[0],
193  infos[1],
194  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
196  case LayerType::Cast:
197  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
199  return IsChannelShuffleSupported(infos[0],
200  infos[1],
201  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
204  return IsComparisonSupported(infos[0],
205  infos[1],
206  infos[2],
207  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
209  case LayerType::Concat:
210  {
211  std::vector<const TensorInfo*> inputInfos;
212  for (uint32_t i = 0; i < (infos.size() - 1); i++)
213  {
214  inputInfos.push_back(&infos[i]);
215  }
216  return IsConcatSupported(inputInfos,
217  infos[infos.size() - 1],
218  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
220  }
221  case LayerType::Constant:
222  return IsConstantSupported(infos[0], reasonIfUnsupported);
224  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
226  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
228  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
230  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
232  {
233  if (infos.size() != 4)
234  {
235  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
236  "TensorInfos should be of format: {input, output, weights, biases}.");
237  }
238 
239  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
240  if (infos[3] == TensorInfo())
241  {
242  return IsConvolution2dSupported(infos[0],
243  infos[1],
244  desc,
245  infos[2],
246  EmptyOptional(),
248  }
249  else
250  {
251  return IsConvolution2dSupported(infos[0],
252  infos[1],
253  desc,
254  infos[2],
255  infos[3],
257  }
258  }
260  {
261  if (infos.size() != 4)
262  {
263  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
264  "TensorInfos should be of format: {input, output, weights, biases}.");
265  }
266 
267  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
268  if (infos[3] == TensorInfo())
269  {
270  return IsConvolution3dSupported(infos[0],
271  infos[1],
272  desc,
273  infos[2],
274  EmptyOptional(),
276  }
277  else
278  {
279  return IsConvolution3dSupported(infos[0],
280  infos[1],
281  desc,
282  infos[2],
283  infos[3],
285  }
286  }
288  return IsDepthToSpaceSupported(infos[0],
289  infos[1],
290  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
293  {
294  if (infos.size() != 4)
295  {
296  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
297  "TensorInfos should be of format: {input, output, weights, biases}.");
298  }
299 
300  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
301  if (infos[3] == TensorInfo())
302  {
303  return IsDepthwiseConvolutionSupported(infos[0],
304  infos[1],
305  desc,
306  infos[2],
307  EmptyOptional(),
309  }
310  else
311  {
312  return IsDepthwiseConvolutionSupported(infos[0],
313  infos[1],
314  desc,
315  infos[2],
316  infos[3],
318  }
319  }
321  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
323  {
324  auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
326  infos[1],
327  infos[2],
328  infos[3],
329  infos[4],
330  infos[5],
331  infos[6],
332  desc,
334  }
335  case LayerType::Division:
336  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
338  return IsElementwiseUnarySupported(infos[0],
339  infos[1],
340  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
342  case LayerType::Fill:
343  return IsFillSupported(infos[0],
344  infos[1],
345  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
347  case LayerType::Floor:
348  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
350  return IsFullyConnectedSupported(infos[0],
351  infos[1],
352  infos[2],
353  infos[3],
354  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
356  case LayerType::Gather:
357  return IsGatherSupported(infos[0],
358  infos[1],
359  infos[2],
360  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
362  case LayerType::GatherNd:
363  return IsGatherNdSupported(infos[0],
364  infos[1],
365  infos[2],
367  case LayerType::Input:
368  return IsInputSupported(infos[0], reasonIfUnsupported);
370  return IsInstanceNormalizationSupported(infos[0],
371  infos[1],
372  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
373  (&descriptor)),
376  return IsL2NormalizationSupported(infos[0],
377  infos[1],
378  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
381  return IsLogicalBinarySupported(infos[0],
382  infos[1],
383  infos[2],
384  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
387  return IsLogSoftmaxSupported(infos[0],
388  infos[1],
389  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
391  case LayerType::Lstm:
392  return IsLstmSupported(infos[0],
393  infos[1],
394  infos[2],
395  infos[3],
396  infos[4],
397  infos[5],
398  infos[6],
399  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
400  lstmParamsInfo.value(),
402  case LayerType::Map:
403  return true;
404  case LayerType::Maximum:
405  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
406  case LayerType::Mean:
407  return IsMeanSupported(infos[0],
408  infos[1],
409  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
411  case LayerType::MemCopy:
412  return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
415  case LayerType::Merge:
416  return LayerSupportBase::IsMergeSupported(infos[0],
417  infos[1],
418  infos[2],
420  case LayerType::Minimum:
421  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
423  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
425  return IsNormalizationSupported(infos[0],
426  infos[1],
427  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
429  case LayerType::Output:
430  return IsOutputSupported(infos[0], reasonIfUnsupported);
431  case LayerType::Pad:
432  return IsPadSupported(infos[0],
433  infos[1],
434  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
436  case LayerType::Permute:
437  return IsPermuteSupported(infos[0],
438  infos[1],
439  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
442  return IsPooling2dSupported(infos[0],
443  infos[1],
444  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
447  return IsPooling3dSupported(infos[0],
448  infos[1],
449  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
451  case LayerType::Prelu:
452  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
453  case LayerType::QLstm:
454  return IsQLstmSupported(infos[0],
455  infos[1],
456  infos[2],
457  infos[3],
458  infos[4],
459  infos[5],
460  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
461  lstmParamsInfo.value(),
463  case LayerType::Quantize:
464  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
466  return IsQuantizedLstmSupported(infos[0],
467  infos[1],
468  infos[2],
469  infos[3],
470  infos[4],
471  quantizedLstmParamsInfo.value(),
473  case LayerType::Rank:
474  return true;
475  case LayerType::Reshape:
476  return IsReshapeSupported(infos[0],
477  infos[1],
478  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
480  case LayerType::Resize:
481  return IsResizeSupported(infos[0],
482  infos[1],
483  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
485  case LayerType::Reduce:
486  return IsReduceSupported(infos[0],
487  infos[1],
488  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
490  case LayerType::Shape:
491  return LayerSupportBase::IsShapeSupported(infos[0],
492  infos[1],
494  case LayerType::Slice:
495  return IsSliceSupported(infos[0],
496  infos[1],
497  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
499  case LayerType::Softmax:
500  return IsSoftmaxSupported(infos[0],
501  infos[1],
502  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
505  return IsSpaceToBatchNdSupported(infos[0],
506  infos[1],
507  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
510  return IsSpaceToDepthSupported(infos[0],
511  infos[1],
512  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
514  case LayerType::Splitter:
515  {
516  std::vector<TensorInfo> outputInfos;
517  for (uint32_t i = 1; i < infos.size(); i++)
518  {
519  outputInfos.push_back(infos[i]);
520  }
521  return IsSplitterSupported(infos[0],
522  {outputInfos.begin(), outputInfos.end()},
523  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
525  }
526  case LayerType::Stack:
527  {
528  std::vector<const TensorInfo*> inputInfos;
529  for (uint32_t i = 0; i < infos.size() - 1; i++)
530  {
531  inputInfos.push_back(&infos[i]);
532  }
533  return IsStackSupported(inputInfos,
534  infos[infos.size() - 1],
535  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
537  }
539  return IsStridedSliceSupported(infos[0],
540  infos[1],
541  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
544  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
546  return IsTransposeSupported(infos[0],
547  infos[1],
548  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
551  {
552  if (infos.size() != 4)
553  {
554  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
555  "TensorInfos should be of format: {input, output, weights, biases}.");
556  }
557 
558  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
559  if (infos[3] == TensorInfo())
560  {
561  return IsTransposeConvolution2dSupported(infos[0],
562  infos[1],
563  desc,
564  infos[2],
565  EmptyOptional(),
567  }
568  else
569  {
570  return IsTransposeConvolution2dSupported(infos[0],
571  infos[1],
572  desc,
573  infos[2],
574  infos[3],
576  }
577  }
580  infos[1],
581  infos[2],
582  infos[3],
583  infos[4],
584  infos[5],
585  *(PolymorphicDowncast<const
587  lstmParamsInfo.value(),
589  case LayerType::Unmap:
590  return true;
591  default:
592  // layers not supported in neon by default:
593  // debug, fakequantization, precompiled,
594  // standin, switch
595  return false;
596  }
597 }
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor & descriptor
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
LstmDescriptor UnidirectionalSequenceLstmDescriptor
DestType PolymorphicDowncast(SourceType *value)
Polymorphic downcast for build in pointers only.
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1078 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::LogicalAnd, armnn::LogicalOr, LogicalBinaryDescriptor::m_Operation, armnn::NeonLogicalAndWorkloadValidate(), and armnn::NeonLogicalOrWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1083 {
1084  switch(descriptor.m_Operation)
1085  {
1089  input0,
1090  input1,
1091  output);
1095  input0,
1096  input1,
1097  output);
1098  default:
1099  return false;
1100  }
1101 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1103 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLogSoftmaxWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1107 {
1109 }
const TensorInfo & output
arm_compute::Status NeonLogSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor)
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1111 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonLstmFloatWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1121 {
1124  input,
1125  outputStateIn,
1126  cellStateIn,
1127  scratchBuffer,
1129  cellStateOut,
1130  output,
1131  descriptor,
1132  paramsInfo);
1133 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1135 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMaximumWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1139 {
1142  input0,
1143  input1,
1144  output);
1145 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonMaximumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1147 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMeanWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1151 {
1154  input,
1155  output,
1156  descriptor);
1157 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonMeanWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1159 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMinimumWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1163 {
1166  input0,
1167  input1,
1168  output);
1169 }
const TensorInfo & output
arm_compute::Status NeonMinimumWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output)
Validate function for validating the inputs and output.
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1171 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonMultiplicationWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1175 {
1178  input0,
1179  input1,
1180  output,
1181  nullptr);
1182 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonMultiplicationWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1197 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonNormalizationWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1201 {
1204  input,
1205  output,
1206  descriptor);
1207 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1209 of file NeonLayerSupport.cpp.

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1211 {
1212  return IsNeonBackendSupported(reasonIfUnsupported, output);
1213 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1215 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPadWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1219 {
1222  input,
1223  output,
1224  descriptor);
1225 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonPadWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1227 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPermuteWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1231 {
1233 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonPermuteWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1235 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling2dWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1239 {
1241 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor)

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1243 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPooling3dWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1247 {
1249 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonPooling3dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor)
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1251 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonPreluWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1255 {
1257 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & alpha
arm_compute::Status NeonPreluWorkloadValidate(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1259 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonQLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1268 {
1269  // Check required here in order to pass IsLayerSupported for datatypes tests
1270  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1276  {
1279  input,
1282  cellStateOut,
1284  output,
1285  descriptor,
1286  paramsInfo);
1287  }
1288  else
1289  {
1290  return false;
1291  }
1292 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonQLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo & previousCellStateIn
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
const TensorInfo & previousOutputIn
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizedLstmSupported()

bool IsQuantizedLstmSupported ( const TensorInfo input,
const TensorInfo cellStateIn,
const TensorInfo outputStateIn,
const TensorInfo cellStateOut,
const TensorInfo outputStateOut,
const QuantizedLstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1304 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizedLstmWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1311 {
1314  input,
1315  cellStateIn,
1316  outputStateIn,
1317  cellStateOut,
1319  paramsInfo);
1320 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & outputStateIn
arm_compute::Status NeonQuantizedLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &cellStateIn, const TensorInfo &outputStateIn, const TensorInfo &cellStateOut, const TensorInfo &outputStateOut, const QuantizedLstmInputParamsInfo &paramsInfo)
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1294 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonQuantizeWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1297 {
1300  input,
1301  output);
1302 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1322 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonReduceWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1326 {
1329  input,
1330  output,
1331  descriptor);
1332 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonReduceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1334 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, armnn::IgnoreUnused(), and armnn::NeonReshapeWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1338 {
1342  input,
1343  output);
1344 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo &input, const TensorInfo &output)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1346 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonResizeWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1350 {
1353  input,
1354  output,
1355  descriptor);
1356 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor)

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1358 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSliceWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1362 {
1365  input,
1366  output,
1367  descriptor);
1368 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor)
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1370 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSoftmaxWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1374 {
1376 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonSoftmaxWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1378 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToBatchNdWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1382 {
1385  input,
1386  output,
1387  descriptor);
1388 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonSpaceToBatchNdWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1390 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSpaceToDepthWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1394 {
1397  input,
1398  output,
1399  descriptor);
1400 }
arm_compute::Status NeonSpaceToDepthWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor)
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1402 of file NeonLayerSupport.cpp.

References armnn::ComputeSplitAxis(), FORWARD_WORKLOAD_VALIDATE_FUNC, ViewsDescriptor::GetNumDimensions(), TensorInfo::GetShape(), armnn::IgnoreUnused(), TensorInfo::IsTypeSpaceMatch(), armnn::NeonSplitterWorkloadValidate(), ILayerSupport::output, ILayerSupport::outputs, and armnn::SetValueChecked().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1406 {
1407 #if defined(ARMCOMPUTENEON_ENABLED)
1408  // Split along the last dimension, cannot use sub-tensors
1409  // as width and height of the sub-tensors do not match
1410  // the width and height of the parent tensor
1411  // in case of input with more than 2D.
1412  std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1413  if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1414  *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1415  {
1418  input,
1419  outputs,
1420  *splitAxis.begin());
1421  }
1422 #endif
1424  for (auto output : outputs)
1425  {
1426  if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1427  {
1428  SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1429  return false;
1430  }
1431  }
1432  return true;
1433 }
arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, unsigned int splitAxis)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
const TensorInfo & output
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
std::set< unsigned int > ComputeSplitAxis(const armnn::SplitterDescriptor &desc, const TensorShape &input)
void SetValueChecked(Optional< T &> optionalRef, V &&val)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1435 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStackWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1439 {
1442  inputs,
1443  output,
1444  descriptor);
1445 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonStackWorkloadValidate(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1447 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonStridedSliceWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1451 {
1454  input,
1455  output,
1456  descriptor);
1457 }
const TensorInfo & output
arm_compute::Status NeonStridedSliceWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor)
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1459 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonSubtractionWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1463 {
1466  input0,
1467  input1,
1468  output,
1469  nullptr);
1470 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ActivationDescriptor *activationDescriptor)
const TensorInfo & input1
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1472 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeConvolution2dWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1478 {
1481  input,
1482  output,
1483  descriptor,
1484  weights,
1485  biases);
1486 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1488 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, and armnn::NeonTransposeWorkloadValidate().

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1492 {
1494 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
arm_compute::Status NeonTransposeWorkloadValidate(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1496 of file NeonLayerSupport.cpp.

References FORWARD_WORKLOAD_VALIDATE_FUNC, TensorInfo::GetDataType(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), armnn::QAsymmS8, and armnn::QSymmS16.

Referenced by NeonLayerSupport::IsLayerSupported(), and NeonLayerSupport::~NeonLayerSupport().

1505 {
1506  if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1512  {
1515  input,
1516  outputStateIn,
1517  cellStateIn,
1519  cellStateOut,
1520  output,
1521  descriptor,
1522  paramsInfo);
1523  }
1524  else
1525  {
1528  input,
1529  outputStateIn,
1530  cellStateIn,
1532  cellStateOut,
1533  output,
1534  descriptor,
1535  paramsInfo);
1536  }
1537 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
arm_compute::Status NeonUnidirectionalSequenceLstmWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
arm_compute::Status NeonUnidirectionalSequenceLstmFloatWorkloadValidate(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo)
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported,...)

The documentation for this class was generated from the following files: