ArmNN
 22.08
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from ILayerSupport
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsActivationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsAdditionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsArgMinMaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchToSpaceNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsCastSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsChannelShuffleSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsComparisonSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertBf16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp32ToBf16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp32ToFp16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDebugSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthToSpaceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDequantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDivisionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsElementwiseUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFakeQuantizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFillSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFloorSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFullyConnectedSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsGatherSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInputSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInstanceNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsL2NormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalBinarySupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMaximumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMeanSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemCopySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemImportSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMergeSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMinimumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMultiplicationSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsOutputSupported(const TensorInfo &output
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPadSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPermuteSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreCompiledSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreluSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizedLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsRankSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReduceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReshapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsResizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsShapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSliceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToBatchNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToDepthSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSplitterSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSubtractionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSwitchSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input
 

Additional Inherited Members

- Public Attributes inherited from ILayerSupport
const TensorInfooutput
 
const TensorInfo const ActivationDescriptordescriptor
 
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoinput1
 
const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ArgMinMaxDescriptordescriptor
 
const TensorInfo const ArgMinMaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfomean
 
const TensorInfo const TensorInfo const TensorInfovar
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfobeta
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfogamma
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const BatchToSpaceNdDescriptordescriptor
 
const TensorInfo const BatchToSpaceNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ChannelShuffleDescriptordescriptor
 
const TensorInfo const ChannelShuffleDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const ComparisonDescriptordescriptor
 
const TensorInfo const TensorInfo const ComparisonDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConcatSupported(const std Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution2dDescriptordescriptor
 
const TensorInfo const Convolution2dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution3dDescriptordescriptor
 
const TensorInfo const Convolution3dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthToSpaceDescriptordescriptor
 
const TensorInfo const DepthToSpaceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthwiseConvolution2dDescriptordescriptor
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoscores
 
const TensorInfo const TensorInfoanchors
 
const TensorInfo const TensorInfo const TensorInfodetectionBoxes
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionClasses
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionScores
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfonumDetections
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const =0
 
const TensorInfo const ElementwiseUnaryDescriptordescriptor
 
const TensorInfo const ElementwiseUnaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const FakeQuantizationDescriptordescriptor
 
const FakeQuantizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const FillDescriptordescriptor
 
const TensorInfo const FillDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoweights
 
const TensorInfo const TensorInfo const TensorInfobiases
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const GatherDescriptordescriptor
 
const TensorInfo const TensorInfo const GatherDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const InstanceNormalizationDescriptordescriptor
 
const TensorInfo const InstanceNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const L2NormalizationDescriptordescriptor
 
const TensorInfo const L2NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptordescriptor
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const LogSoftmaxDescriptordescriptor
 
const TensorInfo const LogSoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfooutputStateIn
 
const TensorInfo const TensorInfocellStateIn
 
const TensorInfo const TensorInfo const TensorInfoscratchBuffer
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const MeanDescriptordescriptor
 
const TensorInfo const MeanDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoouput
 
const TensorInfo const NormalizationDescriptordescriptor
 
const TensorInfo const NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PadDescriptordescriptor
 
const TensorInfo const PadDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PermuteDescriptordescriptor
 
const TensorInfo const PermuteDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling2dDescriptordescriptor
 
const TensorInfo const Pooling2dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling3dDescriptordescriptor
 
const TensorInfo const Pooling3dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const PreCompiledDescriptordescriptor
 
const PreCompiledDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoalpha
 
const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReduceDescriptordescriptor
 
const TensorInfo const ReduceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReshapeDescriptordescriptor
 
const TensorInfo const ReshapeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ResizeDescriptordescriptor
 
const TensorInfo const ResizeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SliceDescriptordescriptor
 
const TensorInfo const SliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SoftmaxDescriptordescriptor
 
const TensorInfo const SoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToBatchNdDescriptordescriptor
 
const TensorInfo const SpaceToBatchNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToDepthDescriptordescriptor
 
const TensorInfo const SpaceToDepthDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptordescriptor
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfooutput
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptordescriptor
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfooutput0
 
const TensorInfo const TensorInfo const TensorInfooutput1
 
const TensorInfo const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeConvolution2dDescriptordescriptor
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeDescriptordescriptor
 
const TensorInfo const TransposeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 512 of file RefLayerSupport.cpp.

References armnn::Abs, armnn::BFloat16, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by RefLayerSupport::IsLayerSupported().

516 {
517  bool supported = true;
518 
519  // Define supported types.
520  std::array<DataType,6> supportedTypes = {
527  };
528 
529  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
530  "Reference activation: input type not supported.");
531 
532  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
533  "Reference activation: output type not supported.");
534 
535  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
536  "Reference activation: input and output types mismatched.");
537 
538  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
539  "Reference activation: input and output shapes are of different rank.");
540 
541 
542  struct ActivationFunctionSupported : public Rule
543  {
544  ActivationFunctionSupported(const ActivationDescriptor& desc)
545  {
546  switch(desc.m_Function)
547  {
560  {
561  m_Res = true;
562  break;
563  }
564  default:
565  {
566  m_Res = false;
567  break;
568  }
569  }
570  }
571  };
572 
573  // Function is supported
574  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
575  "Reference activation: function not supported.");
576 
577  return supported;
578 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
min(a, max(b, input)) ReLu1 & ReLu6.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 580 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported(), and TEST_SUITE().

584 {
585  bool supported = true;
586 
587  std::array<DataType,7> supportedTypes = {
595  };
596 
597  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
598  "Reference addition: input 0 is not a supported type.");
599 
600  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
601  "Reference addition: input 1 is not a supported type.");
602 
603  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
604  "Reference addition: output is not a supported type.");
605 
606  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
607  "Reference addition: input 0 and Input 1 types are mismatched");
608 
609  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
610  "Reference addition: input and output types are mismatched");
611 
612  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
613  "Reference addition: shapes are not suitable for implicit broadcast.");
614 
615  return supported;
616 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 618 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

621 {
623 
624  std::array<DataType, 8> supportedInputTypes =
625  {
634  };
635 
636  std::array<DataType,2> supportedOutputTypes = {
638  DataType::Signed64
639  };
640 
641  bool supported = true;
642 
643  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
644  "Reference ArgMinMax: input is not a supported type.");
645  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
646  "Reference ArgMinMax: output type not supported");
647 
648  return supported;
649 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 651 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

656 {
658 
659  std::array<DataType, 6> supportedTypes =
660  {
667  };
668 
669  bool supported = true;
670 
671  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
672  "Reference batch matrix multiplication: input X is not a supported type");
673 
674  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
675  "Reference batch matrix multiplication: input Y is not a supported type");
676 
677  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
678  "Reference batch matrix multiplication: output is not a supported type");
679 
680  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
681  "Reference batch matrix multiplication: input X and input Y types are mismatched");
682 
683  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
684  "Reference batch matrix multiplication: inputs and output types are mismatched");
685 
686  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
688  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
689 
690  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
692  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
693 
694  return supported;
695 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 697 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

705 {
707 
708  std::array<DataType, 6> supportedTypes =
709  {
716  };
717 
718  bool supported = true;
719 
720  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
721  "Reference batch normalization: input is not a supported type.");
722 
723  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
724  "Reference batch normalization: output is not a supported type.");
725 
726  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
727  "Reference batch normalization: input and output types are mismatched");
728 
729  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
730  "Reference batch normalization: mean is not a supported type.");
731 
732  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
733  "Reference batch normalization: variance is not a supported type.");
734 
735  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
736  "Reference batch normalization: beta is not a supported type.");
737 
738  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
739  "Reference batch normalization: gamma is not a supported type.");
740 
741  return supported;
742 }
const TensorInfo & output
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const TensorInfo & mean

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 744 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

748 {
750 
751  bool supported = true;
752 
753  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
754  std::string inputTensorStr = "input";
755  std::string outputTensorStr = "output";
756 
757  // Define supported types.
758  std::array<DataType,6> supportedTypes =
759  {
766  };
767 
768  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
769  "Reference BatchToSpaceNd: input type not supported.");
770 
771  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
772  "Reference BatchToSpaceNd: output type not supported.");
773 
774  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
775  "Reference BatchToSpaceNd: input and output types mismatched.");
776 
777  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
779  CreateIncorrectDimensionsErrorMsg(4,
781  batchToSpaceNdLayerStr,
782  outputTensorStr).data());
783 
784  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
786  CreateIncorrectDimensionsErrorMsg(4,
787  input.GetNumDimensions(),
788  batchToSpaceNdLayerStr,
789  inputTensorStr).data());
790 
791  return supported;
792 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 794 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

797 {
798  std::array<DataType, 9> supportedInputTypes =
799  {
808  };
809 
810  bool supported = true;
811  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
812  "Reference cast: input is not a supported type");
813 
814 
815  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
816  "Reference cast: output is not a supported type");
817 
818  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
819  "Reference cast: input and output shapes have different number of total elements");
820 
821  return supported;
822 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 824 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

828 {
830  bool supported = true;
831 
832  // Define supported output and inputs types.
833  std::array<DataType, 7> supportedTypes =
834  {
842  };
843 
844  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
845  "Reference ChannelShuffle: input is not a supported type.");
846 
847  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
848  "Reference ChannelShuffle: output is not a supported type.");
849 
850  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
851  "Reference ChannelShuffle: input and output types are mismatched.");
852 
853  return supported;
854 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 857 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

862 {
864  std::array<DataType, 8> supportedInputTypes =
865  {
874  };
875 
876  bool supported = true;
877  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
878  "Reference comparison: input 0 is not a supported type");
879 
880  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
881  "Reference comparison: input 0 and Input 1 types are mismatched");
882 
884  "Reference comparison: output is not of type Boolean");
885 
886  return supported;
887 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 889 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

893 {
895 
896  bool supported = true;
897  std::array<DataType,7> supportedTypes =
898  {
906  };
907 
908  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
909  "Reference concatenation: output type not supported");
910  for (const TensorInfo* input : inputs)
911  {
912  ARMNN_ASSERT(input != nullptr);
913  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
914  "Reference concatenation: input type not supported");
915 
916  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
917  "Reference concatenation: input and output types mismatched.");
918  }
919 
920  return supported;
921 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 923 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

925 {
926  std::array<DataType,8> supportedTypes =
927  {
936  };
937 
938  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
939  "Reference constant: output is not a supported type.");
940 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 942 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

Referenced by RefLayerSupport::IsLayerSupported().

945 {
946  bool supported = true;
947 
948  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
949  "Reference for ConvertBf16ToFp32 layer: input type not supported");
950 
952  "Reference for ConvertBf16ToFp32 layer: output type not supported");
953 
954  return supported;
955 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 957 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

960 {
962  input.GetDataType(),
963  &TrueFunc<>,
964  &FalseInputFuncF32<>,
965  &FalseFuncU8<>,
966  &FalseFuncI32<>,
967  &FalseFuncU8<>) &&
970  &FalseOutputFuncF16<>,
971  &TrueFunc<>,
972  &FalseFuncU8<>,
973  &FalseFuncI32<>,
974  &FalseFuncU8<>));
975 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
DataType GetDataType() const
Definition: Tensor.hpp:198
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 977 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

Referenced by RefLayerSupport::IsLayerSupported().

980 {
981  bool supported = true;
982 
983  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
984  "Reference for ConvertFp32ToBf16 layer: input type not supported");
985 
987  "Reference for ConvertFp32ToBf16 layer: output type not supported");
988 
989  return supported;
990 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 992 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

995 {
997  input.GetDataType(),
998  &FalseInputFuncF16<>,
999  &TrueFunc<>,
1000  &FalseFuncU8<>,
1001  &FalseFuncI32<>,
1002  &FalseFuncU8<>) &&
1004  output.GetDataType(),
1005  &TrueFunc<>,
1006  &FalseOutputFuncF32<>,
1007  &FalseFuncU8<>,
1008  &FalseFuncI32<>,
1009  &FalseFuncU8<>));
1010 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
DataType GetDataType() const
Definition: Tensor.hpp:198
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1012 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1018 {
1019  bool supported = true;
1020 
1021  // Define supported types.
1022  std::array<DataType,7> supportedTypes =
1023  {
1031  };
1032 
1033  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1034  "Reference Convolution2d: input is not a supported type.");
1035 
1036  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1037  "Reference Convolution2d: output is not a supported type.");
1038 
1039  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1040  if (input.GetDataType() == DataType::BFloat16)
1041  {
1043  {
1044  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1045  supported = false;
1046  }
1047  }
1048  else
1049  {
1050  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1051  "Reference Convolution2d: input and output types mismatched.");
1052  }
1053 
1054  const DataType inputType = input.GetDataType();
1055  if (IsQuantized8BitType(inputType))
1056  {
1057  std::array<DataType, 3> supportedWeightTypes =
1058  {
1062  };
1063 
1064  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1065  "Reference Convolution2d: weights type not supported for quantized input.");
1066  }
1067  else
1068  {
1069  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1070  "Reference Convolution2d: weights is not a supported type.");
1071 
1072  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1073  "Reference Convolution2d: input and weights types mismatched.");
1074  }
1075 
1076  if (biases.has_value())
1077  {
1078  std::array<DataType,4> biasesSupportedTypes =
1079  {
1084  };
1085 
1086  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1087  "Reference Convolution2d: biases is not a supported type.");
1088  }
1090 
1091  return supported;
1092 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1094 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1100 {
1101  bool supported = true;
1102 
1103  // Define supported types.
1104  std::array<DataType,7> supportedTypes =
1105  {
1113  };
1114 
1115  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1116  "Reference Convolution3d: input is not a supported type.");
1117 
1118  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1119  "Reference Convolution3d: output is not a supported type.");
1120 
1121  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1122  "Reference Convolution3d: input and output types mismatched.");
1123 
1124  const DataType inputType = input.GetDataType();
1125  if (IsQuantized8BitType(inputType))
1126  {
1127  std::array<DataType, 3> supportedWeightTypes =
1128  {
1132  };
1133 
1134  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1135  "Reference Convolution3d: weights type not supported for quantized input.");
1136  }
1137  else
1138  {
1139  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1140  "Reference Convolution3d: weights is not a supported type.");
1141 
1142  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1143  "Reference Convolution3d: input and weights types mismatched.");
1144  }
1145 
1146  if (biases.has_value())
1147  {
1148  std::array<DataType,4> biasesSupportedTypes =
1149  {
1154  };
1155 
1156  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1157  "Reference Convolution3d: biases is not a supported type.");
1158  }
1160 
1161  return supported;
1162 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1164 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1167 {
1168  bool supported = true;
1169 
1170  std::array<DataType, 8> supportedTypes =
1171  {
1180  };
1181 
1182  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1183  "Reference for Debug layer: input type not supported");
1184 
1185  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1186  "Reference for Debug layer: output type not supported");
1187 
1188  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1189  "Reference for Debug layer: input and output types are mismatched");
1190 
1191  return supported;
1192 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1194 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1198 {
1200  bool supported = true;
1201 
1202  std::array<DataType,6> supportedTypes =
1203  {
1210  };
1211 
1212  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1213  "Reference DepthToSpace: input type not supported");
1214 
1215  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1216  "Reference DepthToSpace: output type not supported");
1217 
1218  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1219  "Reference DepthToSpace: input and output types are mismatched");
1220 
1221  return supported;
1222 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1224 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().

1230 {
1232  bool supported = true;
1233 
1234  // Define supported types.
1235  std::array<DataType,7> supportedTypes =
1236  {
1244  };
1245 
1246  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1247  "Reference DepthwiseConvolution2d: input is not a supported type.");
1248 
1249  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1250  "Reference DepthwiseConvolution2d: output is not a supported type.");
1251 
1252  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1253  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1254 
1255  const DataType inputType = input.GetDataType();
1256  if (IsQuantized8BitType(inputType))
1257  {
1258  std::array<DataType, 3> supportedWeightTypes =
1259  {
1263  };
1264 
1265  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1266  "Reference DepthwiseConvolution2d: weights type not supported for "
1267  "quantized input.");
1268  }
1269  else
1270  {
1271  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1272  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1273 
1274  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1275  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1276  }
1277 
1278  if (biases.has_value())
1279  {
1280  std::array<DataType,4> biasesSupportedTypes =
1281  {
1286  };
1287  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1288  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1289  }
1290 
1291  return supported;
1292 
1293 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1295 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

1298 {
1299  bool supported = true;
1300 
1301  std::array<DataType,5> supportedInputTypes = {
1307  };
1308 
1309  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1310  "Reference for Dequantize layer: input type not supported.");
1311 
1312  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1313  "Reference for Dequantize layer: per-axis quantized input not supported.");
1314 
1315  std::array<DataType,3> supportedOutputTypes = {
1318  DataType::Float16
1319  };
1320 
1321  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1322  "Reference for Dequantize layer: output type not supported.");
1323 
1324  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1325  "Reference for Dequantize layer: input/output shapes have different num total "
1326  "elements.");
1327 
1328  return supported;
1329 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1331 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1340 {
1342 
1343  bool supported = true;
1344 
1345  std::array<DataType,6> supportedInputTypes =
1346  {
1353  };
1354 
1355  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1356  "Reference DetectionPostProcess: input 0 is not a supported type.");
1357 
1358  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1359  "Reference DetectionPostProcess: input 1 is not a supported type.");
1360 
1361  return supported;
1362 }
const TensorInfo const TensorInfo & anchors
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & scores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1364 of file RefLayerSupport.cpp.

References RefLayerSupport::IsDepthwiseConvolutionSupported().

1370 {
1372 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1374 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1378 {
1379  bool supported = true;
1380 
1381  std::array<DataType,7> supportedTypes = {
1389  };
1390 
1391  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1392  "Reference division: input 0 is not a supported type.");
1393 
1394  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1395  "Reference division: input 1 is not a supported type.");
1396 
1397  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1398  "Reference division: output is not a supported type.");
1399 
1400  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1401  "Reference division: input 0 and Input 1 types are mismatched");
1402 
1403  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1404  "Reference division: input and output types are mismatched");
1405 
1406  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1407  "Reference division: shapes are not suitable for implicit broadcast.");
1408 
1409  return supported;
1410 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1412 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1416 {
1418 
1419  std::array<DataType, 7> supportedTypes =
1420  {
1428  };
1429 
1430  std::array<DataType, 1> logicalSupportedTypes =
1431  {
1433  };
1434 
1435  bool supported = true;
1436 
1437  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1438  {
1439  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1440  "Reference elementwise unary: input type not supported");
1441 
1442  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1443  "Reference elementwise unary: output type not supported");
1444  }
1445  else
1446  {
1447  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1448  "Reference elementwise unary: input type not supported");
1449 
1450  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1451  "Reference elementwise unary: output type not supported");
1452  }
1453 
1454  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1455  "Reference elementwise unary: input and output types not matching");
1456 
1457  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1458  "Reference elementwise unary: input and output shapes"
1459  "have different number of total elements");
1460 
1461  return supported;
1462 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1464 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1467 {
1469  bool supported = true;
1470 
1471  std::array<DataType,1> supportedTypes =
1472  {
1474  };
1475 
1476  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1477  "Reference fake quantization: input type not supported.");
1478 
1479  return supported;
1480 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1482 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1486 {
1489 
1490  bool supported = true;
1491 
1492  std::array<DataType,3> supportedTypes =
1493  {
1497  };
1498 
1499  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1500  "Reference Fill: input type not supported.");
1501 
1502  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1503  "Reference Fill: output type not supported.");
1504  return supported;
1505 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1507 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1510 {
1512  bool supported = true;
1513 
1514  std::array<DataType,3> supportedTypes =
1515  {
1519  };
1520 
1521  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1522  "Reference Floor: input type not supported.");
1523 
1524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1525  "Reference Floor: output type not supported.");
1526 
1527  return supported;
1528 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1530 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1536 {
1537  bool supported = true;
1538 
1539  // Define supported types.
1540  std::array<DataType,6> supportedTypes =
1541  {
1548  };
1549 
1550  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1551  "Reference Fully Connected: input type not supported.");
1552 
1553  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1554  "Reference Fully Connected: output type not supported.");
1555 
1556  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1557  "Reference Fully Connected: weights type not supported.");
1558 
1559  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1560  if (input.GetDataType() == DataType::BFloat16)
1561  {
1563  {
1564  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1565  supported = false;
1566  }
1567  }
1568  else
1569  {
1570  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1571  "Reference Fully Connected: input and output types mismatched.");
1572  }
1573 
1574  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1575  "Reference Fully Connected: weights is not a supported type.");
1576 
1577  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1578  "Reference Fully Connected: input and weights types mismatched.");
1579 
1580  if (descriptor.m_BiasEnabled)
1581  {
1582  // Defined supported types for bias
1583  std::array<DataType, 5>
1584  supportedBiasTypes =
1585  {
1591  };
1592 
1593  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1594  "Reference Fully Connected: bias type not supported.");
1595 
1596  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1597  "Reference Fully Connected: bias and weight types mismatch.");
1598 
1599  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1600  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1601 
1602  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1603  "Reference Fully Connected: bias must have 1 dimension.");
1604 
1605  }
1606 
1607  return supported;
1608 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1610 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1614 {
1615  bool supported = true;
1616  std::array<DataType,7> supportedTypes =
1617  {
1625  };
1626 
1627  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1628  "Reference GatherNd: input type not supported");
1629 
1630  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1631  "Reference GatherNd: output type not supported");
1632 
1633  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1634  "Reference GatherNd: indices (input1) type not supported");
1635 
1636  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1637  "Reference GatherNd: input and output types not matching");
1638 
1639  return supported;
1640 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1642 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, GatherDescriptor::m_Axis, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1647 {
1648  bool supported = true;
1649  std::array<DataType,7> supportedTypes =
1650  {
1658  };
1659 
1660  if (descriptor.m_Axis != 0)
1661  {
1662  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1663  supported &= false;
1664  }
1665  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1666  "Reference Gather: input type not supported");
1667 
1668  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1669  "Reference Gather: output type not supported");
1670 
1671  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1672  "Reference Gather: indices (input1) type not supported");
1673 
1674  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1675  "Reference Gather: input and output types not matching");
1676 
1677  return supported;
1678 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1680 of file RefLayerSupport.cpp.

Referenced by RefLayerSupport::IsLayerSupported().

1682 {
1683  return true;
1684 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1686 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1690 {
1692  // Define supported types
1693  std::array<DataType, 3> supportedTypes =
1694  {
1698  };
1699 
1700  bool supported = true;
1701 
1702  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1703  "Reference Instance Normalization: input type not supported.");
1704 
1705  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1706  "Reference Instance Normalization: output type not supported.");
1707 
1708  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1709  "Reference Instance Normalization: input and output types mismatched.");
1710 
1711  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1712  "Reference Instance Normalization: input and output shapes have different "
1713  "num total elements.");
1714 
1715  return supported;
1716 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1718 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1722 {
1724  // Define supported types
1725  std::array<DataType, 6> supportedTypes =
1726  {
1733  };
1734 
1735  bool supported = true;
1736 
1737  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1738  "Reference L2normalization: input type not supported.");
1739 
1740  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1741  "Reference L2normalization: output type not supported.");
1742 
1743  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1744  "Reference L2normalization: input and output types mismatched.");
1745 
1746  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1747  "Reference L2normalization: input and output shapes have different "
1748  "num total elements.");
1749 
1750  return supported;
1751 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmInputParamsInfo,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, ILayerSupport::descriptor, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchMatMulSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertBf16ToFp32Supported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToBf16Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, ILayerSupport::reasonIfUnsupported, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
128  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
130  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
132  {
133  if (infos.size() != 4)
134  {
135  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
136  "TensorInfos should be of format: {input, output, weights, biases}.");
137  }
138 
139  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
140  if (infos[3] == TensorInfo())
141  {
142  return IsConvolution2dSupported(infos[0],
143  infos[1],
144  desc,
145  infos[2],
146  EmptyOptional(),
148  }
149  else
150  {
151  return IsConvolution2dSupported(infos[0],
152  infos[1],
153  desc,
154  infos[2],
155  infos[3],
157  }
158  }
160  return IsDepthToSpaceSupported(infos[0],
161  infos[1],
162  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
165  {
166  if (infos.size() != 4)
167  {
168  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
169  "TensorInfos should be of format: {input, output, weights, biases}.");
170  }
171 
172  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
173  if (infos[3] == TensorInfo())
174  {
175  return IsDepthwiseConvolutionSupported(infos[0],
176  infos[1],
177  desc,
178  infos[2],
179  EmptyOptional(),
181  }
182  else
183  {
184  return IsDepthwiseConvolutionSupported(infos[0],
185  infos[1],
186  desc,
187  infos[2],
188  infos[3],
190  }
191  }
193  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
194  case LayerType::Division:
195  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
197  return IsElementwiseUnarySupported(infos[0],
198  infos[1],
199  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
201  case LayerType::Fill:
202  return IsFillSupported(infos[0],
203  infos[1],
204  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
206  case LayerType::Floor:
207  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
209  return IsFullyConnectedSupported(infos[0],
210  infos[1],
211  infos[2],
212  infos[3],
213  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
215  case LayerType::Gather:
216  return IsGatherSupported(infos[0],
217  infos[1],
218  infos[2],
219  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
221  case LayerType::GatherNd:
222  return IsGatherNdSupported(infos[0],
223  infos[1],
224  infos[2],
226  case LayerType::Input:
227  return IsInputSupported(infos[0], reasonIfUnsupported);
229  return IsInstanceNormalizationSupported(infos[0],
230  infos[1],
231  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
232  (&descriptor)),
235  return IsL2NormalizationSupported(infos[0],
236  infos[1],
237  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
240  return IsLogicalBinarySupported(infos[0],
241  infos[1],
242  infos[2],
243  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
246  return IsLogSoftmaxSupported(infos[0],
247  infos[1],
248  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
250  case LayerType::Lstm:
251  return IsLstmSupported(infos[0],
252  infos[1],
253  infos[2],
254  infos[3],
255  infos[4],
256  infos[5],
257  infos[6],
258  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
259  lstmParamsInfo.value(),
261  case LayerType::QLstm:
262  return IsQLstmSupported(infos[0],
263  infos[1],
264  infos[2],
265  infos[3],
266  infos[4],
267  infos[5],
268  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
269  lstmParamsInfo.value(),
271  case LayerType::Maximum:
272  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
273  case LayerType::Mean:
274  return IsMeanSupported(infos[0],
275  infos[1],
276  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
278  case LayerType::Minimum:
279  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
281  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
283  return IsNormalizationSupported(infos[0],
284  infos[1],
285  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
287  case LayerType::Output:
288  return IsOutputSupported(infos[0], reasonIfUnsupported);
289  case LayerType::Pad:
290  return IsPadSupported(infos[0],
291  infos[1],
292  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
294  case LayerType::Permute:
295  return IsPermuteSupported(infos[0],
296  infos[1],
297  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
300  return IsPooling2dSupported(infos[0],
301  infos[1],
302  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
304  case LayerType::Prelu:
305  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
306  case LayerType::Quantize:
307  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
308  case LayerType::Reshape:
309  return IsReshapeSupported(infos[0],
310  infos[1],
311  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
313  case LayerType::Resize:
314  return IsResizeSupported(infos[0],
315  infos[1],
316  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
318  case LayerType::Reduce:
319  return IsReduceSupported(infos[0],
320  infos[1],
321  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
323  case LayerType::Slice:
324  return IsSliceSupported(infos[0],
325  infos[1],
326  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
328  case LayerType::Softmax:
329  return IsSoftmaxSupported(infos[0],
330  infos[1],
331  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
334  return IsSpaceToBatchNdSupported(infos[0],
335  infos[1],
336  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
339  return IsSpaceToDepthSupported(infos[0],
340  infos[1],
341  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
343  case LayerType::Splitter:
344  {
345  std::vector<TensorInfo> outputInfos;
346  for (uint32_t i = 1; i < infos.size(); i++)
347  {
348  outputInfos.push_back(infos[i]);
349  }
350  return IsSplitterSupported(infos[0],
351  {outputInfos.begin(), outputInfos.end()},
352  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
354  }
355  case LayerType::Stack:
356  {
357  std::vector<const TensorInfo*> inputInfos;
358  for (uint32_t i = 0; i < infos.size() - 1; i++)
359  {
360  inputInfos.push_back(&infos[i]);
361  }
362  return IsStackSupported(inputInfos,
363  infos[infos.size() - 1],
364  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
366  }
368  return IsStridedSliceSupported(infos[0],
369  infos[1],
370  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
373  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
375  return IsTransposeSupported(infos[0],
376  infos[1],
377  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
380  {
381  if (infos.size() != 4)
382  {
383  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
384  "TensorInfos should be of format: {input, output, weights, biases}.");
385  }
386 
387  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
388  if (infos[3] == TensorInfo())
389  {
390  return IsTransposeConvolution2dSupported(infos[0],
391  infos[1],
392  desc,
393  infos[2],
394  EmptyOptional(),
396  }
397  else
398  {
399  return IsTransposeConvolution2dSupported(infos[0],
400  infos[1],
401  desc,
402  infos[2],
403  infos[3],
405  }
406  }
407  case LayerType::Cast:
408  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
410  return IsChannelShuffleSupported(infos[0],
411  infos[1],
412  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
415  {
416  if (infos.size() != 4)
417  {
418  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
419  "TensorInfos should be of format: {input, output, weights, biases}.");
420  }
421 
422  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
423  if (infos[3] == TensorInfo())
424  {
425  return IsConvolution3dSupported(infos[0],
426  infos[1],
427  desc,
428  infos[2],
429  EmptyOptional(),
431  }
432  else
433  {
434  return IsConvolution3dSupported(infos[0],
435  infos[1],
436  desc,
437  infos[2],
438  infos[3],
440  }
441  }
442  case LayerType::Debug:
443  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
445  return IsDetectionPostProcessSupported(infos[0],
446  infos[1],
447  infos[2],
448  infos[3],
449  infos[4],
450  infos[5],
451  infos[6],
452  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
453  (&descriptor)),
456  return IsFakeQuantizationSupported(infos[0],
457  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
459  case LayerType::MemCopy:
460  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
461  case LayerType::Rank:
462  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
463  case LayerType::Shape:
464  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
466  {
467  if (infos.size() != 6)
468  {
469  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
470  "should be of format: {input, outputStateIn, cellStateIn, "
471  "hiddenStateOutputVal, cellStateOutputVal, output}");
472  }
473  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
475  infos[1],
476  infos[2],
477  infos[3],
478  infos[4],
479  infos[5],
480  desc,
481  lstmParamsInfo.value(),
483  }
485  return IsPooling3dSupported(infos[0],
486  infos[1],
487  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
489  case LayerType::Map:
490  return true;
491  case LayerType::Unmap:
492  return true;
495  case LayerType::Merge:
496  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
499  infos[1],
500  infos[2],
501  infos[3],
502  infos[4],
503  quantizedLstmInputParamsInfo.value(),
505  default:
506  // layers not supported in neon by default:
507  // precompiled, standin, switch
508  return false;
509  }
510 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1753 of file RefLayerSupport.cpp.

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1758 {
1760 
1761  std::array<DataType, 1> supportedTypes =
1762  {
1764  };
1765 
1766  bool supported = true;
1767  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1768  "Reference LogicalBinary: input 0 type not supported");
1769  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1770  "Reference LogicalBinary: input 1 type not supported");
1771 
1772  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1773  "Reference LogicalBinary: input and output types do not match");
1774 
1775  return supported;
1776 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1778 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1782 {
1784 
1785  std::array<DataType, 3> supportedTypes =
1786  {
1790  };
1791 
1792  bool supported = true;
1793  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1794  "Reference LogSoftmax: input type not supported");
1795 
1796  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1797  "Reference LogSoftmax: output type not supported");
1798 
1799  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1800  "Reference LogSoftmax: input and output types do not match");
1801 
1802  return supported;
1803 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1805 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1815 {
1818 
1819  bool supported = true;
1820 
1821  std::array<DataType,3> supportedTypes = {
1825  };
1826 
1827  // check inputs and outputs
1828  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1829  "Reference Lstm: input is not a supported type.");
1830  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1831  "Reference Lstm: input and outputStateIn types are mismatched");
1832  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1833  "Reference Lstm: input and cellStateIn types are mismatched");
1834  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1835  "Reference Lstm: input and scratchBuffer types are mismatched");
1836  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1837  "Reference Lstm: input and outputStateOut types are mismatched");
1838  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1839  "Reference Lstm: input and cellStateOut types are mismatched");
1840 
1841  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1842  "Reference Lstm: input and output types are mismatched");
1843  // check layer parameters
1844  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1845  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1846  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1847  "Reference Lstm: input and InputToCellWeights types are mismatched");
1848  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1849  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1850  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1851  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1852  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1853  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1854  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1855  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1856  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1857  "Reference Lstm: input and ForgetGateBias types are mismatched");
1858  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1859  "Reference Lstm: input and CellBias types are mismatched");
1860  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1861  "Reference Lstm: input and OutputGateBias types are mismatched");
1862  if (!descriptor.m_CifgEnabled)
1863  {
1864  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1865  "Reference Lstm: input and InputToInputWeights types are mismatched");
1866  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1868  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1869  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1870  "Reference Lstm: input and InputGateBias types are mismatched");
1871  if (descriptor.m_PeepholeEnabled)
1872  {
1873  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1875  "Reference Lstm: input and CellToInputWeights types are mismatched");
1876  }
1877  }
1878  if (descriptor.m_PeepholeEnabled)
1879  {
1880  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1881  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1882  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1883  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1884  }
1885  if (descriptor.m_ProjectionEnabled)
1886  {
1887  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1888  "Reference Lstm: input and mProjectionWeights types are mismatched");
1889  if (paramsInfo.m_ProjectionBias != nullptr)
1890  {
1891  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1892  "Reference Lstm: input and ProjectionBias types are mismatched");
1893  }
1894  }
1895  if (descriptor.m_LayerNormEnabled)
1896  {
1897  if (!descriptor.m_CifgEnabled)
1898  {
1899  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1901  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1902  }
1903  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1905  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1906  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1908  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1909  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1911  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1912  }
1913 
1914  return supported;
1915 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1917 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1921 {
1922  bool supported = true;
1923 
1924  std::array<DataType,7> supportedTypes = {
1932  };
1933 
1934  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1935  "Reference maximum: input 0 is not a supported type.");
1936 
1937  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1938  "Reference maximum: input 1 is not a supported type.");
1939 
1940  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1941  "Reference maximum: output is not a supported type.");
1942 
1943  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1944  "Reference maximum: input 0 and Input 1 types are mismatched");
1945 
1946  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1947  "Reference maximum: input and output types are mismatched");
1948 
1949  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1950  "Reference maximum: shapes are not suitable for implicit broadcast.");
1951 
1952  return supported;
1953 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1955 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::numeric_cast(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1959 {
1960  bool supported = true;
1961  std::string meanLayerStr = "Mean";
1962  std::string outputTensorStr = "output";
1963 
1964  std::array<DataType,6> supportedTypes =
1965  {
1972  };
1973 
1974  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1975  "Reference Mean: input type not supported.");
1976 
1977  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1978  "Reference Mean: input and output types are mismatched");
1979 
1980  if (descriptor.m_KeepDims)
1981  {
1982  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1984  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1986  meanLayerStr, outputTensorStr).data());
1987  }
1988  else if (descriptor.m_Axis.empty())
1989  {
1990  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1992  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1993  meanLayerStr, outputTensorStr).data());
1994  }
1995  else
1996  {
1997  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1998 
1999  if (outputDim > 0)
2000  {
2001  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
2003  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
2004  meanLayerStr, outputTensorStr).data());
2005  }
2006  else
2007  {
2008  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
2010  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
2011  meanLayerStr, outputTensorStr).data());
2012  }
2013  }
2014 
2015  return supported;
2016 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2018 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2021 {
2022  bool supported = true;
2023 
2024  std::array<DataType,7> supportedTypes =
2025  {
2033  };
2034 
2035  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2036  "Reference MemCopy: input type not supported");
2037 
2038  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2039  "Reference MemCopy: output type not supported");
2040 
2041  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2042  "Reference MemCopy: input and output types are mismatched");
2043 
2044  return supported;
2045 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2047 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2051 {
2052  bool supported = true;
2053 
2054  std::array<DataType,7> supportedTypes = {
2062  };
2063 
2064  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2065  "Reference minimum: input 0 is not a supported type.");
2066 
2067  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2068  "Reference minimum: input 1 is not a supported type.");
2069 
2070  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2071  "Reference minimum: output is not a supported type.");
2072 
2073  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2074  "Reference minimum: input 0 and Input 1 types are mismatched");
2075 
2076  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2077  "Reference minimum: input and output types are mismatched");
2078 
2079  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2080  "Reference minimum: shapes are not suitable for implicit broadcast.");
2081 
2082  return supported;
2083 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2085 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2089 {
2090  bool supported = true;
2091 
2092  std::array<DataType,7> supportedTypes = {
2100  };
2101 
2102  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2103  "Reference multiplication: input 0 is not a supported type.");
2104 
2105  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2106  "Reference multiplication: input 1 is not a supported type.");
2107 
2108  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2109  "Reference multiplication: output is not a supported type.");
2110 
2111  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2112  "Reference multiplication: input 0 and Input 1 types are mismatched");
2113 
2114  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2115  "Reference multiplication: input and output types are mismatched");
2116 
2117  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2118  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2119 
2120  return supported;
2121 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2123 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2127 {
2129 
2130  // Define supported types
2131  std::array<DataType, 6> supportedTypes =
2132  {
2139  };
2140 
2141  bool supported = true;
2142 
2143  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2144  "Reference normalization: input type not supported.");
2145 
2146  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2147  "Reference normalization: output type not supported.");
2148 
2149  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2150  "Reference normalization: input and output shapes have different "
2151  "num total elements.");
2152 
2153  return supported;
2154 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2156 of file RefLayerSupport.cpp.

Referenced by RefLayerSupport::IsLayerSupported().

2158 {
2159  return true;
2160 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2162 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2166 {
2168  bool supported = true;
2169 
2170  // Define supported output and inputs types.
2171  std::array<DataType,6> supportedTypes =
2172  {
2179  };
2180 
2181  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2182  "Reference pad: input is not a supported type.");
2183 
2184  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2185  "Reference pad: output is not a supported type.");
2186 
2187  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2188  "Reference pad: input and output types are mismatched.");
2189 
2190  return supported;
2191 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2193 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2197 {
2199  bool supported = true;
2200 
2201  // Define supported output and inputs types.
2202  std::array<DataType, 6> supportedTypes =
2203  {
2210  };
2211 
2212  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2213  "Reference permute: input is not a supported type.");
2214 
2215  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2216  "Reference permute: output is not a supported type.");
2217 
2218  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2219  "Reference permute: input and output types are mismatched.");
2220 
2221  return supported;
2222 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2224 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2228 {
2230  bool supported = true;
2231 
2232  // Define supported output and inputs types.
2233  std::array<DataType,6> supportedTypes =
2234  {
2241  };
2242 
2243  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2244  "Reference poolind2d: input is not a supported type.");
2245 
2246  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2247  "Reference poolind2d: output is not a supported type.");
2248 
2249  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2250  "Reference poolind2d: input and output types are mismatched.");
2251 
2252  return supported;
2253 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2255 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2259 {
2261  bool supported = true;
2262 
2263  // Define supported output and inputs types.
2264  std::array<DataType,6> supportedTypes =
2265  {
2272  };
2273 
2274  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2275  "Reference poolind3d: input is not a supported type.");
2276 
2277  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2278  "Reference poolind3d: output is not a supported type.");
2279 
2280  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2281  "Reference poolind3d: input and output types are mismatched.");
2282 
2283  return supported;
2284 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2714 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2718 {
2719  bool supported = true;
2720 
2721  std::array<DataType, 6> supportedTypes
2722  {
2729  };
2730 
2731  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2732  "PReLU: input is not a supported type.");
2733 
2734  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2735  "PReLU: alpha is not a supported type.");
2736 
2737  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2738  "PReLU: output is not a supported type.");
2739 
2740  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2741  "PReLU: input, alpha and output types are mismatched");
2742 
2743  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2744  "PReLU: shapes are not suitable for implicit broadcast");
2745 
2746  return supported;
2747 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & alpha
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2287 of file RefLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

2296 {
2297  IgnoreUnused(input);
2305 
2307 
2308  return true;
2309 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo & previousCellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
const TensorInfo & previousOutputIn

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2311 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

2314 {
2315  bool supported = true;
2316 
2317  // Define supported input types.
2318  std::array<DataType,7> supportedInputTypes = {
2326  };
2327 
2328  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2329  "Reference quantize: input type not supported.");
2330 
2331  // Define supported output types.
2332  std::array<DataType,4> supportedOutputTypes = {
2336  DataType::QSymmS16
2337  };
2338  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2339  "Reference quantize: output type not supported.");
2340 
2341  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2342  "Reference quantize: input and output shapes have different num total elements.");
2343 
2344  return supported;
2345 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2347 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2350 {
2351  IgnoreUnused(input);
2352  // Define supported output types.
2353  std::array<DataType,1> supportedOutputTypes =
2354  {
2356  };
2357 
2358  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2359  "Reference rank: input type not supported.");
2360 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2362 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2366 {
2368  bool supported = true;
2369  std::array<DataType,7> supportedTypes =
2370  {
2378  };
2379 
2380  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2381  "Reference Reduce: input type not supported");
2382 
2383  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2384  "Reference Reduce: output type not supported");
2385 
2386  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2387  "Reference Reduce: input and output types not matching");
2388 
2389  return supported;
2390 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2392 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2396 {
2399  // Define supported output types.
2400  std::array<DataType,8> supportedOutputTypes =
2401  {
2410  };
2411 
2412  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2413  "Reference reshape: input type not supported.");
2414 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2416 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2420 {
2422  bool supported = true;
2423  std::array<DataType,6> supportedTypes =
2424  {
2431  };
2432 
2433  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2434  "Reference Resize: input type not supported");
2435 
2436  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2437  "Reference Resize: output type not supported");
2438 
2439  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2440  "Reference Resize: input and output types not matching");
2441 
2442  return supported;
2443 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2445 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2448 {
2449  IgnoreUnused(input);
2450  bool supported = true;
2451 
2452  std::array<DataType, 1> supportedTypes =
2453  {
2455  };
2456 
2457  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2458  "Reference Shape: output type not supported");
2459 
2460  return supported;
2461 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2463 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2467 {
2469  bool supported = true;
2470 
2471  std::array<DataType, 5> supportedTypes =
2472  {
2478  };
2479 
2480  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2481  "Reference Slice: input type not supported");
2482 
2483  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2484  "Reference Slice: output type not supported");
2485 
2486  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2487  "Reference Slice: input and output types are mismatched");
2488 
2489  return supported;
2490 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2492 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

2496 {
2498  bool supported = true;
2499  std::array<DataType,7> supportedTypes =
2500  {
2508  };
2509 
2510  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2511  "Reference Softmax: output type not supported");
2512 
2513  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2514  "Reference Softmax: input type not supported");
2515 
2516  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2517  "Reference Softmax: input type not supported");
2518 
2519  return supported;
2520 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2522 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2526 {
2528  bool supported = true;
2529  std::array<DataType,6> supportedTypes =
2530  {
2537  };
2538 
2539  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2540  "Reference SpaceToBatchNd: input type not supported");
2541 
2542  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2543  "Reference SpaceToBatchNd: output type not supported");
2544 
2545  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2546  "Reference SpaceToBatchNd: input and output types are mismatched");
2547 
2548  return supported;
2549 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2551 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2555 {
2556 
2558  bool supported = true;
2559 
2560  std::array<DataType,6> supportedTypes =
2561  {
2568  };
2569 
2570  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2571  "Reference SpaceToDepth: input type not supported");
2572 
2573  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2574  "Reference SpaceToDepth: output type not supported");
2575 
2576  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2577  "Reference SpaceToDepth: input and output types are mismatched");
2578 
2579  return supported;
2580 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2582 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputs, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2586 {
2588  bool supported = true;
2589  std::array<DataType,6> supportedTypes =
2590  {
2597  };
2598 
2599  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2600  "Reference splitter: output type not supported");
2601  for (const TensorInfo& output : outputs)
2602  {
2603  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2604  "Reference splitter: input type not supported");
2605 
2606  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2607  "Reference splitter: input and output types mismatched.");
2608  }
2609 
2610  return supported;
2611 }
const TensorInfo & output
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2613 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2617 {
2619 
2620  bool supported = true;
2621  std::array<DataType,7> supportedTypes =
2622  {
2630  };
2631 
2632  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2633  "Reference stack: output type not supported");
2634  for (const TensorInfo* input : inputs)
2635  {
2636  ARMNN_ASSERT(input != nullptr);
2637  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2638  "Reference stack: input type not supported");
2639 
2640  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2641  "Reference stack: input and output types mismatched.");
2642  }
2643 
2644  return supported;
2645 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2647 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2651 {
2653  bool supported = true;
2654 
2655  std::array<DataType,5> supportedTypes =
2656  {
2662  };
2663 
2664  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2665  "Reference StridedSlice: input type not supported");
2666 
2667  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2668  "Reference StridedSlice: output type not supported");
2669 
2670  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2671  "Reference StridedSlice: input and output types are mismatched");
2672 
2673  return supported;
2674 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2676 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2680 {
2681  bool supported = true;
2682 
2683  std::array<DataType,7> supportedTypes = {
2691  };
2692 
2693  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2694  "Reference subtraction: input 0 is not a supported type.");
2695 
2696  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2697  "Reference subtraction: input 1 is not a supported type.");
2698 
2699  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2700  "Reference subtraction: output is not a supported type.");
2701 
2702  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2703  "Reference subtraction: input 0 and Input 1 types are mismatched");
2704 
2705  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2706  "Reference subtraction: input and output types are mismatched");
2707 
2708  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2709  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2710 
2711  return supported;
2712 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2749 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

2755 {
2757  bool supported = true;
2758 
2759  std::array<DataType,7> supportedTypes =
2760  {
2768  };
2769 
2770  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2771  "Reference TransposeConvolution2d: input is not a supported type.");
2772 
2773  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2774  "Reference TransposeConvolution2d: output is not a supported type.");
2775 
2776  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2777  "Reference TransposeConvolution2d: input and output types mismatched.");
2778 
2779 
2780  const DataType inputType = input.GetDataType();
2781  if (IsQuantized8BitType(inputType))
2782  {
2783  std::array<DataType, 3> supportedWeightTypes =
2784  {
2788  };
2789 
2790  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2791  "Reference TransposeConvolution2d: weights type not supported for "
2792  "quantized input.");
2793  }
2794  else
2795  {
2796  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2797  "Reference TransposeConvolution2d: weights is not a supported type.");
2798 
2799  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2800  "Reference TransposeConvolution2d: input and weights types mismatched.");
2801  }
2802 
2803  if (biases.has_value())
2804  {
2805  std::array<DataType,4> biasesSupportedTypes =
2806  {
2811  };
2812  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2813  "Reference TransposeConvolution2d: biases is not a supported type.");
2814  }
2815 
2816  return supported;
2817 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2819 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2823 {
2825  bool supported = true;
2826 
2827  // Define supported output and inputs types.
2828  std::array<DataType, 6> supportedTypes =
2829  {
2836  };
2837 
2838  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2839  "Reference transpose: input is not a supported type.");
2840 
2841  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2842  "Reference transpose: output is not a supported type.");
2843 
2844  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2845  "Reference transpose: input and output types are mismatched.");
2846 
2847  return supported;
2848 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2850 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2860 {
2867  bool supported = true;
2868 
2869  std::array<DataType, 2> supportedTypes =
2870  {
2873  };
2874 
2875  std::array<DataType, 2> supportedWeightTypes =
2876  {
2878  DataType::QAsymmS8
2879  };
2880 
2881  std::array<DataType, 3> supportedBiasTypes =
2882  {
2886  };
2887 
2888  // check inputs and outputs
2889  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2890  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2891  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2892  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2893 
2894  // check layer parameters
2895  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2897  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2898  "is not a supported type.");
2899  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2901  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2902  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2904  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2905  "is not a supported type.");
2906  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2908  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2909  "is not a supported type.");
2910  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2912  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2913  "is not a supported type.");
2914  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2916  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2917  "is not a supported type.");
2918 
2919  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2920  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2921  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2922  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2923  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2924  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2925  if (!descriptor.m_CifgEnabled)
2926  {
2927  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2929  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2930  "is not a supported type.");
2931  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2933  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2934  "is not a supported type.");
2935  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2936  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2937  if (descriptor.m_PeepholeEnabled)
2938  {
2939  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2941  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2942  "is not a supported type.");
2943  }
2944  }
2945  if (descriptor.m_PeepholeEnabled)
2946  {
2947  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2949  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2950  "is not a supported type.");
2951  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2953  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2954  "is not a supported type.");
2955  }
2956  if (descriptor.m_ProjectionEnabled)
2957  {
2958  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2960  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2961  "is not a supported type.");
2962  if (paramsInfo.m_ProjectionBias != nullptr)
2963  {
2964  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2965  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2966  "are mismatched");
2967  }
2968  }
2969  if (descriptor.m_LayerNormEnabled)
2970  {
2971  if (!descriptor.m_CifgEnabled)
2972  {
2973  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2975  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2976  "is not a supported type.");
2977  }
2978  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2980  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2981  "is not a supported type.");
2982  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2984  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2985  "is not a supported type.");
2986  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2988  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2989  "is not a supported type.");
2990  }
2991 
2992  return supported;
2993 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

The documentation for this class was generated from the following files: