ArmNN
 22.02
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from ILayerSupport
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsActivationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsAdditionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsArgMinMaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsBatchNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsBatchToSpaceNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsCastSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsChannelShuffleSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsComparisonSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertBf16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertFp32ToBf16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertFp16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertFp32ToFp16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvolution3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDebugSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDepthToSpaceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDequantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDivisionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsElementwiseUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFakeQuantizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFillSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFloorSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFullyConnectedSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsGatherSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsInputSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsInstanceNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsL2NormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLogicalBinarySupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLogicalUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLogSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMaximumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMeanSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMemCopySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMemImportSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMergeSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMinimumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMultiplicationSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsOutputSupported(const TensorInfo &output
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPadSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPermuteSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPooling2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPooling3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPreCompiledSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPreluSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsQuantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsQLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsQuantizedLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsRankSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsReduceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsReshapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsResizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsShapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSliceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSpaceToBatchNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSpaceToDepthSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSplitterSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSubtractionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSwitchSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsTransposeConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsTransposeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input
 

Additional Inherited Members

- Public Attributes inherited from ILayerSupport
const TensorInfooutput
 
const TensorInfo const ActivationDescriptordescriptor
 
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoinput1
 
const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ArgMinMaxDescriptordescriptor
 
const TensorInfo const ArgMinMaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfomean
 
const TensorInfo const TensorInfo const TensorInfovar
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfobeta
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfogamma
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const BatchToSpaceNdDescriptordescriptor
 
const TensorInfo const BatchToSpaceNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ChannelShuffleDescriptordescriptor
 
const TensorInfo const ChannelShuffleDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const ComparisonDescriptordescriptor
 
const TensorInfo const TensorInfo const ComparisonDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConcatSupported(const std Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution2dDescriptordescriptor
 
const TensorInfo const Convolution2dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution3dDescriptordescriptor
 
const TensorInfo const Convolution3dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthToSpaceDescriptordescriptor
 
const TensorInfo const DepthToSpaceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthwiseConvolution2dDescriptordescriptor
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoscores
 
const TensorInfo const TensorInfoanchors
 
const TensorInfo const TensorInfo const TensorInfodetectionBoxes
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionClasses
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionScores
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfonumDetections
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const =0
 
const TensorInfo const ElementwiseUnaryDescriptordescriptor
 
const TensorInfo const ElementwiseUnaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const FakeQuantizationDescriptordescriptor
 
const FakeQuantizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const FillDescriptordescriptor
 
const TensorInfo const FillDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoweights
 
const TensorInfo const TensorInfo const TensorInfobiases
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const GatherDescriptordescriptor
 
const TensorInfo const TensorInfo const GatherDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const InstanceNormalizationDescriptordescriptor
 
const TensorInfo const InstanceNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const L2NormalizationDescriptordescriptor
 
const TensorInfo const L2NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptordescriptor
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const LogSoftmaxDescriptordescriptor
 
const TensorInfo const LogSoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfooutputStateIn
 
const TensorInfo const TensorInfocellStateIn
 
const TensorInfo const TensorInfo const TensorInfoscratchBuffer
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const MeanDescriptordescriptor
 
const TensorInfo const MeanDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoouput
 
const TensorInfo const NormalizationDescriptordescriptor
 
const TensorInfo const NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PadDescriptordescriptor
 
const TensorInfo const PadDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PermuteDescriptordescriptor
 
const TensorInfo const PermuteDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling2dDescriptordescriptor
 
const TensorInfo const Pooling2dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling3dDescriptordescriptor
 
const TensorInfo const Pooling3dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const PreCompiledDescriptordescriptor
 
const PreCompiledDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoalpha
 
const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReduceDescriptordescriptor
 
const TensorInfo const ReduceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReshapeDescriptordescriptor
 
const TensorInfo const ReshapeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ResizeDescriptordescriptor
 
const TensorInfo const ResizeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SliceDescriptordescriptor
 
const TensorInfo const SliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SoftmaxDescriptordescriptor
 
const TensorInfo const SoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToBatchNdDescriptordescriptor
 
const TensorInfo const SpaceToBatchNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToDepthDescriptordescriptor
 
const TensorInfo const SpaceToDepthDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptordescriptor
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStandInSupported(const std const TensorInfooutput
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptordescriptor
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfooutput0
 
const TensorInfo const TensorInfo const TensorInfooutput1
 
const TensorInfo const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeConvolution2dDescriptordescriptor
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeDescriptordescriptor
 
const TensorInfo const TransposeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > & hiddenStateOutput
 
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > & cellStateOutput
 
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 543 of file RefLayerSupport.cpp.

References armnn::Abs, armnn::BFloat16, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by RefLayerSupport::IsLayerSupported().

547 {
548  bool supported = true;
549 
550  // Define supported types.
551  std::array<DataType,6> supportedTypes = {
558  };
559 
560  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
561  "Reference activation: input type not supported.");
562 
563  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
564  "Reference activation: output type not supported.");
565 
566  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
567  "Reference activation: input and output types mismatched.");
568 
569  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
570  "Reference activation: input and output shapes are of different rank.");
571 
572 
573  struct ActivationFunctionSupported : public Rule
574  {
575  ActivationFunctionSupported(const ActivationDescriptor& desc)
576  {
577  switch(desc.m_Function)
578  {
591  {
592  m_Res = true;
593  break;
594  }
595  default:
596  {
597  m_Res = false;
598  break;
599  }
600  }
601  }
602  };
603 
604  // Function is supported
605  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
606  "Reference activation: function not supported.");
607 
608  return supported;
609 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
min(a, max(b, input)) ReLu1 & ReLu6.
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 611 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported(), and TEST_SUITE().

615 {
616  bool supported = true;
617 
618  std::array<DataType,7> supportedTypes = {
626  };
627 
628  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
629  "Reference addition: input 0 is not a supported type.");
630 
631  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
632  "Reference addition: input 1 is not a supported type.");
633 
634  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
635  "Reference addition: output is not a supported type.");
636 
637  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
638  "Reference addition: input 0 and Input 1 types are mismatched");
639 
640  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
641  "Reference addition: input and output types are mismatched");
642 
643  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
644  "Reference addition: shapes are not suitable for implicit broadcast.");
645 
646  return supported;
647 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 649 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

652 {
654 
655  std::array<DataType, 8> supportedInputTypes =
656  {
665  };
666 
667  std::array<DataType,2> supportedOutputTypes = {
669  DataType::Signed64
670  };
671 
672  bool supported = true;
673 
674  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
675  "Reference ArgMinMax: input is not a supported type.");
676  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
677  "Reference ArgMinMax: output type not supported");
678 
679  return supported;
680 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 682 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

690 {
692 
693  std::array<DataType, 6> supportedTypes =
694  {
701  };
702 
703  bool supported = true;
704 
705  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
706  "Reference batch normalization: input is not a supported type.");
707 
708  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
709  "Reference batch normalization: output is not a supported type.");
710 
711  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
712  "Reference batch normalization: input and output types are mismatched");
713 
714  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
715  "Reference batch normalization: mean is not a supported type.");
716 
717  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
718  "Reference batch normalization: variance is not a supported type.");
719 
720  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
721  "Reference batch normalization: beta is not a supported type.");
722 
723  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
724  "Reference batch normalization: gamma is not a supported type.");
725 
726  return supported;
727 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const TensorInfo & mean

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 729 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

733 {
735 
736  bool supported = true;
737 
738  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
739  std::string inputTensorStr = "input";
740  std::string outputTensorStr = "output";
741 
742  // Define supported types.
743  std::array<DataType,6> supportedTypes =
744  {
751  };
752 
753  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
754  "Reference BatchToSpaceNd: input type not supported.");
755 
756  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
757  "Reference BatchToSpaceNd: output type not supported.");
758 
759  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
760  "Reference BatchToSpaceNd: input and output types mismatched.");
761 
762  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
764  CreateIncorrectDimensionsErrorMsg(4,
766  batchToSpaceNdLayerStr,
767  outputTensorStr).data());
768 
769  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
771  CreateIncorrectDimensionsErrorMsg(4,
772  input.GetNumDimensions(),
773  batchToSpaceNdLayerStr,
774  inputTensorStr).data());
775 
776  return supported;
777 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 779 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

782 {
783  std::array<DataType, 9> supportedInputTypes =
784  {
793  };
794 
795  bool supported = true;
796  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
797  "Reference cast: input is not a supported type");
798 
799 
800  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
801  "Reference cast: output is not a supported type");
802 
803  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
804  "Reference cast: input and output shapes have different number of total elements");
805 
806  return supported;
807 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 809 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

813 {
815  bool supported = true;
816 
817  // Define supported output and inputs types.
818  std::array<DataType, 7> supportedTypes =
819  {
827  };
828 
829  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
830  "Reference ChannelShuffle: input is not a supported type.");
831 
832  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
833  "Reference ChannelShuffle: output is not a supported type.");
834 
835  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
836  "Reference ChannelShuffle: input and output types are mismatched.");
837 
838  return supported;
839 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 842 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

847 {
849  std::array<DataType, 8> supportedInputTypes =
850  {
859  };
860 
861  bool supported = true;
862  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
863  "Reference comparison: input 0 is not a supported type");
864 
865  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
866  "Reference comparison: input 0 and Input 1 types are mismatched");
867 
869  "Reference comparison: output is not of type Boolean");
870 
871  return supported;
872 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 874 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

878 {
880 
881  bool supported = true;
882  std::array<DataType,6> supportedTypes =
883  {
890  };
891 
892  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
893  "Reference concatenation: output type not supported");
894  for (const TensorInfo* input : inputs)
895  {
896  ARMNN_ASSERT(input != nullptr);
897  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
898  "Reference concatenation: input type not supported");
899 
900  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
901  "Reference concatenation: input and output types mismatched.");
902  }
903 
904  return supported;
905 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 907 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

909 {
910  std::array<DataType,8> supportedTypes =
911  {
920  };
921 
922  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
923  "Reference constant: output is not a supported type.");
924 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 926 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

Referenced by RefLayerSupport::IsLayerSupported().

929 {
930  bool supported = true;
931 
932  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
933  "Reference for ConvertBf16ToFp32 layer: input type not supported");
934 
936  "Reference for ConvertBf16ToFp32 layer: output type not supported");
937 
938  return supported;
939 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 941 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

944 {
946  input.GetDataType(),
947  &TrueFunc<>,
948  &FalseInputFuncF32<>,
949  &FalseFuncU8<>,
950  &FalseFuncI32<>,
951  &FalseFuncU8<>) &&
954  &FalseOutputFuncF16<>,
955  &TrueFunc<>,
956  &FalseFuncU8<>,
957  &FalseFuncI32<>,
958  &FalseFuncU8<>));
959 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo & output
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 961 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

Referenced by RefLayerSupport::IsLayerSupported().

964 {
965  bool supported = true;
966 
967  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
968  "Reference for ConvertFp32ToBf16 layer: input type not supported");
969 
971  "Reference for ConvertFp32ToBf16 layer: output type not supported");
972 
973  return supported;
974 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 976 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

979 {
981  input.GetDataType(),
982  &FalseInputFuncF16<>,
983  &TrueFunc<>,
984  &FalseFuncU8<>,
985  &FalseFuncI32<>,
986  &FalseFuncU8<>) &&
989  &TrueFunc<>,
990  &FalseOutputFuncF32<>,
991  &FalseFuncU8<>,
992  &FalseFuncI32<>,
993  &FalseFuncU8<>));
994 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo & output
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 996 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1002 {
1003  bool supported = true;
1004 
1005  // Define supported types.
1006  std::array<DataType,7> supportedTypes =
1007  {
1015  };
1016 
1017  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1018  "Reference Convolution2d: input is not a supported type.");
1019 
1020  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1021  "Reference Convolution2d: output is not a supported type.");
1022 
1023  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
1024  if (input.GetDataType() == DataType::BFloat16)
1025  {
1027  {
1028  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1029  supported = false;
1030  }
1031  }
1032  else
1033  {
1034  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1035  "Reference Convolution2d: input and output types mismatched.");
1036  }
1037 
1038  const DataType inputType = input.GetDataType();
1039  if (IsQuantized8BitType(inputType))
1040  {
1041  std::array<DataType, 3> supportedWeightTypes =
1042  {
1046  };
1047 
1048  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1049  "Reference Convolution2d: weights type not supported for quantized input.");
1050  }
1051  else
1052  {
1053  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1054  "Reference Convolution2d: weights is not a supported type.");
1055 
1056  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1057  "Reference Convolution2d: input and weights types mismatched.");
1058  }
1059 
1060  if (biases.has_value())
1061  {
1062  std::array<DataType,4> biasesSupportedTypes =
1063  {
1068  };
1069 
1070  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1071  "Reference Convolution2d: biases is not a supported type.");
1072  }
1074 
1075  return supported;
1076 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:35
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo & output
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1078 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1084 {
1085  bool supported = true;
1086 
1087  // Define supported types.
1088  std::array<DataType,7> supportedTypes =
1089  {
1097  };
1098 
1099  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1100  "Reference Convolution3d: input is not a supported type.");
1101 
1102  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1103  "Reference Convolution3d: output is not a supported type.");
1104 
1105  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1106  "Reference Convolution3d: input and output types mismatched.");
1107 
1108  const DataType inputType = input.GetDataType();
1109  if (IsQuantized8BitType(inputType))
1110  {
1111  std::array<DataType, 3> supportedWeightTypes =
1112  {
1116  };
1117 
1118  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1119  "Reference Convolution3d: weights type not supported for quantized input.");
1120  }
1121  else
1122  {
1123  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1124  "Reference Convolution3d: weights is not a supported type.");
1125 
1126  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1127  "Reference Convolution3d: input and weights types mismatched.");
1128  }
1129 
1130  if (biases.has_value())
1131  {
1132  std::array<DataType,4> biasesSupportedTypes =
1133  {
1138  };
1139 
1140  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1141  "Reference Convolution3d: biases is not a supported type.");
1142  }
1144 
1145  return supported;
1146 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:35
const TensorInfo & output
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1148 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1151 {
1152  bool supported = true;
1153 
1154  std::array<DataType, 8> supportedTypes =
1155  {
1164  };
1165 
1166  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1167  "Reference for Debug layer: input type not supported");
1168 
1169  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1170  "Reference for Debug layer: output type not supported");
1171 
1172  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1173  "Reference for Debug layer: input and output types are mismatched");
1174 
1175  return supported;
1176 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1178 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1182 {
1184  bool supported = true;
1185 
1186  std::array<DataType,6> supportedTypes =
1187  {
1194  };
1195 
1196  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1197  "Reference DepthToSpace: input type not supported");
1198 
1199  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1200  "Reference DepthToSpace: output type not supported");
1201 
1202  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1203  "Reference DepthToSpace: input and output types are mismatched");
1204 
1205  return supported;
1206 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1208 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().

1214 {
1216  bool supported = true;
1217 
1218  // Define supported types.
1219  std::array<DataType,7> supportedTypes =
1220  {
1228  };
1229 
1230  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1231  "Reference DepthwiseConvolution2d: input is not a supported type.");
1232 
1233  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1234  "Reference DepthwiseConvolution2d: output is not a supported type.");
1235 
1236  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1237  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1238 
1239  const DataType inputType = input.GetDataType();
1240  if (IsQuantized8BitType(inputType))
1241  {
1242  std::array<DataType, 3> supportedWeightTypes =
1243  {
1247  };
1248 
1249  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1250  "Reference DepthwiseConvolution2d: weights type not supported for "
1251  "quantized input.");
1252  }
1253  else
1254  {
1255  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1256  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1257 
1258  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1259  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1260  }
1261 
1262  if (biases.has_value())
1263  {
1264  std::array<DataType,4> biasesSupportedTypes =
1265  {
1270  };
1271  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1272  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1273  }
1274 
1275  return supported;
1276 
1277 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:35
const TensorInfo & output
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1279 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

1282 {
1283  bool supported = true;
1284 
1285  std::array<DataType,4> supportedInputTypes = {
1290  };
1291 
1292  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1293  "Reference for Dequantize layer: input type not supported.");
1294 
1295  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1296  "Reference for Dequantize layer: per-axis quantized input not supported.");
1297 
1298  std::array<DataType,3> supportedOutputTypes = {
1302  };
1303 
1304  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1305  "Reference for Dequantize layer: output type not supported.");
1306 
1307  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1308  "Reference for Dequantize layer: input/output shapes have different num total "
1309  "elements.");
1310 
1311  return supported;
1312 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1314 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1323 {
1325 
1326  bool supported = true;
1327 
1328  std::array<DataType,6> supportedInputTypes =
1329  {
1336  };
1337 
1338  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1339  "Reference DetectionPostProcess: input 0 is not a supported type.");
1340 
1341  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1342  "Reference DetectionPostProcess: input 1 is not a supported type.");
1343 
1344  return supported;
1345 }
const TensorInfo const TensorInfo & anchors
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & scores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1347 of file RefLayerSupport.cpp.

References RefLayerSupport::IsDepthwiseConvolutionSupported().

1353 {
1355 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo & output
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1357 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1361 {
1362  bool supported = true;
1363 
1364  std::array<DataType,7> supportedTypes = {
1372  };
1373 
1374  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1375  "Reference division: input 0 is not a supported type.");
1376 
1377  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1378  "Reference division: input 1 is not a supported type.");
1379 
1380  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1381  "Reference division: output is not a supported type.");
1382 
1383  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1384  "Reference division: input 0 and Input 1 types are mismatched");
1385 
1386  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1387  "Reference division: input and output types are mismatched");
1388 
1389  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1390  "Reference division: shapes are not suitable for implicit broadcast.");
1391 
1392  return supported;
1393 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1395 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1399 {
1401 
1402  std::array<DataType, 7> supportedTypes =
1403  {
1411  };
1412 
1413  std::array<DataType, 1> logicalSupportedTypes =
1414  {
1416  };
1417 
1418  bool supported = true;
1419 
1420  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1421  {
1422  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1423  "Reference elementwise unary: input type not supported");
1424 
1425  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1426  "Reference elementwise unary: output type not supported");
1427  }
1428  else
1429  {
1430  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1431  "Reference elementwise unary: input type not supported");
1432 
1433  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1434  "Reference elementwise unary: output type not supported");
1435  }
1436 
1437  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1438  "Reference elementwise unary: input and output types not matching");
1439 
1440  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1441  "Reference elementwise unary: input and output shapes"
1442  "have different number of total elements");
1443 
1444  return supported;
1445 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1447 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1450 {
1452  bool supported = true;
1453 
1454  std::array<DataType,1> supportedTypes =
1455  {
1457  };
1458 
1459  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1460  "Reference fake quantization: input type not supported.");
1461 
1462  return supported;
1463 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1465 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1469 {
1472 
1473  bool supported = true;
1474 
1475  std::array<DataType,3> supportedTypes =
1476  {
1480  };
1481 
1482  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1483  "Reference Fill: input type not supported.");
1484 
1485  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1486  "Reference Fill: output type not supported.");
1487  return supported;
1488 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1490 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1493 {
1495  bool supported = true;
1496 
1497  std::array<DataType,3> supportedTypes =
1498  {
1502  };
1503 
1504  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1505  "Reference Floor: input type not supported.");
1506 
1507  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1508  "Reference Floor: output type not supported.");
1509 
1510  return supported;
1511 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1513 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1519 {
1520  bool supported = true;
1521 
1522  // Define supported types.
1523  std::array<DataType,6> supportedTypes =
1524  {
1531  };
1532 
1533  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1534  "Reference Fully Connected: input type not supported.");
1535 
1536  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1537  "Reference Fully Connected: output type not supported.");
1538 
1539  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1540  "Reference Fully Connected: weights type not supported.");
1541 
1542  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1543  if (input.GetDataType() == DataType::BFloat16)
1544  {
1546  {
1547  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1548  supported = false;
1549  }
1550  }
1551  else
1552  {
1553  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1554  "Reference Fully Connected: input and output types mismatched.");
1555  }
1556 
1557  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1558  "Reference Fully Connected: weights is not a supported type.");
1559 
1560  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1561  "Reference Fully Connected: input and weights types mismatched.");
1562 
1563  if (descriptor.m_BiasEnabled)
1564  {
1565  // Defined supported types for bias
1566  std::array<DataType, 5>
1567  supportedBiasTypes =
1568  {
1574  };
1575 
1576  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1577  "Reference Fully Connected: bias type not supported.");
1578 
1579  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1580  "Reference Fully Connected: bias and weight types mismatch.");
1581 
1582  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1583  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1584 
1585  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1586  "Reference Fully Connected: bias must have 1 dimension.");
1587 
1588  }
1589 
1590  return supported;
1591 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo & output
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1593 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, GatherDescriptor::m_Axis, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1598 {
1599  bool supported = true;
1600  std::array<DataType,7> supportedTypes =
1601  {
1609  };
1610 
1611  if (descriptor.m_Axis != 0)
1612  {
1613  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1614  supported &= false;
1615  }
1616  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1617  "Reference Gather: input type not supported");
1618 
1619  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1620  "Reference Gather: output type not supported");
1621 
1622  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1623  "Reference Gather: indices (input1) type not supported");
1624 
1625  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1626  "Reference Gather: input and output types not matching");
1627 
1628  return supported;
1629 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1631 of file RefLayerSupport.cpp.

Referenced by RefLayerSupport::IsLayerSupported().

1633 {
1634  return true;
1635 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1637 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1641 {
1643  // Define supported types
1644  std::array<DataType, 3> supportedTypes =
1645  {
1649  };
1650 
1651  bool supported = true;
1652 
1653  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1654  "Reference Instance Normalization: input type not supported.");
1655 
1656  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1657  "Reference Instance Normalization: output type not supported.");
1658 
1659  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1660  "Reference Instance Normalization: input and output types mismatched.");
1661 
1662  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1663  "Reference Instance Normalization: input and output shapes have different "
1664  "num total elements.");
1665 
1666  return supported;
1667 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1669 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1673 {
1675  // Define supported types
1676  std::array<DataType, 6> supportedTypes =
1677  {
1684  };
1685 
1686  bool supported = true;
1687 
1688  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1689  "Reference L2normalization: input type not supported.");
1690 
1691  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1692  "Reference L2normalization: output type not supported.");
1693 
1694  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1695  "Reference L2normalization: input and output types mismatched.");
1696 
1697  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1698  "Reference L2normalization: input and output shapes have different "
1699  "num total elements.");
1700 
1701  return supported;
1702 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmInputParamsInfo,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, ILayerSupport::descriptor, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertBf16ToFp32Supported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToBf16Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, ILayerSupport::reasonIfUnsupported, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchNormalizationSupported(infos[0],
84  infos[1],
85  infos[2],
86  infos[3],
87  infos[4],
88  infos[5],
89  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
90  (&descriptor)),
93  return IsBatchToSpaceNdSupported(infos[0],
94  infos[1],
95  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
98  return IsComparisonSupported(infos[0],
99  infos[1],
100  infos[2],
101  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
103  case LayerType::Concat:
104  {
105  std::vector<const TensorInfo*> inputInfos;
106  for (uint32_t i = 0; i < (infos.size() - 1); i++)
107  {
108  inputInfos.push_back(&infos[i]);
109  }
110  return IsConcatSupported(inputInfos,
111  infos[infos.size() - 1],
112  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
114  }
115  case LayerType::Constant:
116  return IsConstantSupported(infos[0], reasonIfUnsupported);
118  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
120  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
122  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
124  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
126  {
127  if (infos.size() != 4)
128  {
129  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130  "TensorInfos should be of format: {input, output, weights, biases}.");
131  }
132 
133  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
134  if (infos[3] == TensorInfo())
135  {
136  return IsConvolution2dSupported(infos[0],
137  infos[1],
138  desc,
139  infos[2],
140  EmptyOptional(),
142  }
143  else
144  {
145  return IsConvolution2dSupported(infos[0],
146  infos[1],
147  desc,
148  infos[2],
149  infos[3],
151  }
152  }
154  return IsDepthToSpaceSupported(infos[0],
155  infos[1],
156  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
159  {
160  if (infos.size() != 4)
161  {
162  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
163  "TensorInfos should be of format: {input, output, weights, biases}.");
164  }
165 
166  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
167  if (infos[3] == TensorInfo())
168  {
169  return IsDepthwiseConvolutionSupported(infos[0],
170  infos[1],
171  desc,
172  infos[2],
173  EmptyOptional(),
175  }
176  else
177  {
178  return IsDepthwiseConvolutionSupported(infos[0],
179  infos[1],
180  desc,
181  infos[2],
182  infos[3],
184  }
185  }
187  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
188  case LayerType::Division:
189  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
191  return IsElementwiseUnarySupported(infos[0],
192  infos[1],
193  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
195  case LayerType::Fill:
196  return IsFillSupported(infos[0],
197  infos[1],
198  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
200  case LayerType::Floor:
201  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
203  return IsFullyConnectedSupported(infos[0],
204  infos[1],
205  infos[2],
206  infos[3],
207  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
209  case LayerType::Gather:
210  return IsGatherSupported(infos[0],
211  infos[1],
212  infos[2],
213  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
215  case LayerType::Input:
216  return IsInputSupported(infos[0], reasonIfUnsupported);
218  return IsInstanceNormalizationSupported(infos[0],
219  infos[1],
220  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
221  (&descriptor)),
224  return IsL2NormalizationSupported(infos[0],
225  infos[1],
226  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
229  return IsLogicalBinarySupported(infos[0],
230  infos[1],
231  infos[2],
232  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
235  return IsLogSoftmaxSupported(infos[0],
236  infos[1],
237  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
239  case LayerType::Lstm:
240  return IsLstmSupported(infos[0],
241  infos[1],
242  infos[2],
243  infos[3],
244  infos[4],
245  infos[5],
246  infos[6],
247  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
248  lstmParamsInfo.value(),
250  case LayerType::QLstm:
251  return IsQLstmSupported(infos[0],
252  infos[1],
253  infos[2],
254  infos[3],
255  infos[4],
256  infos[5],
257  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
258  lstmParamsInfo.value(),
260  case LayerType::Maximum:
261  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
262  case LayerType::Mean:
263  return IsMeanSupported(infos[0],
264  infos[1],
265  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
267  case LayerType::Minimum:
268  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
270  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
272  return IsNormalizationSupported(infos[0],
273  infos[1],
274  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
276  case LayerType::Output:
277  return IsOutputSupported(infos[0], reasonIfUnsupported);
278  case LayerType::Pad:
279  return IsPadSupported(infos[0],
280  infos[1],
281  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
283  case LayerType::Permute:
284  return IsPermuteSupported(infos[0],
285  infos[1],
286  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
289  return IsPooling2dSupported(infos[0],
290  infos[1],
291  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
293  case LayerType::Prelu:
294  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
295  case LayerType::Quantize:
296  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
297  case LayerType::Reshape:
298  return IsReshapeSupported(infos[0],
299  infos[1],
300  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
302  case LayerType::Resize:
303  return IsResizeSupported(infos[0],
304  infos[1],
305  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
307  case LayerType::Reduce:
308  return IsReduceSupported(infos[0],
309  infos[1],
310  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
312  case LayerType::Slice:
313  return IsSliceSupported(infos[0],
314  infos[1],
315  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
317  case LayerType::Softmax:
318  return IsSoftmaxSupported(infos[0],
319  infos[1],
320  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
323  return IsSpaceToBatchNdSupported(infos[0],
324  infos[1],
325  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
328  return IsSpaceToDepthSupported(infos[0],
329  infos[1],
330  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
332  case LayerType::Splitter:
333  {
334  std::vector<TensorInfo> outputInfos;
335  for (uint32_t i = 1; i < infos.size(); i++)
336  {
337  outputInfos.push_back(infos[i]);
338  }
339  return IsSplitterSupported(infos[0],
340  {outputInfos.begin(), outputInfos.end()},
341  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
343  }
344  case LayerType::Stack:
345  {
346  std::vector<const TensorInfo*> inputInfos;
347  for (uint32_t i = 0; i < infos.size() - 1; i++)
348  {
349  inputInfos.push_back(&infos[i]);
350  }
351  return IsStackSupported(inputInfos,
352  infos[infos.size() - 1],
353  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
355  }
357  return IsStridedSliceSupported(infos[0],
358  infos[1],
359  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
362  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
364  return IsTransposeSupported(infos[0],
365  infos[1],
366  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
369  {
370  if (infos.size() != 4)
371  {
372  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
373  "TensorInfos should be of format: {input, output, weights, biases}.");
374  }
375 
376  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
377  if (infos[3] == TensorInfo())
378  {
379  return IsTransposeConvolution2dSupported(infos[0],
380  infos[1],
381  desc,
382  infos[2],
383  EmptyOptional(),
385  }
386  else
387  {
388  return IsTransposeConvolution2dSupported(infos[0],
389  infos[1],
390  desc,
391  infos[2],
392  infos[3],
394  }
395  }
396  case LayerType::Cast:
397  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
399  return IsChannelShuffleSupported(infos[0],
400  infos[1],
401  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
404  {
405  if (infos.size() != 4)
406  {
407  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
408  "TensorInfos should be of format: {input, output, weights, biases}.");
409  }
410 
411  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
412  if (infos[3] == TensorInfo())
413  {
414  return IsConvolution3dSupported(infos[0],
415  infos[1],
416  desc,
417  infos[2],
418  EmptyOptional(),
420  }
421  else
422  {
423  return IsConvolution3dSupported(infos[0],
424  infos[1],
425  desc,
426  infos[2],
427  infos[3],
429  }
430  }
431  case LayerType::Debug:
432  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
434  return IsDetectionPostProcessSupported(infos[0],
435  infos[1],
436  infos[2],
437  infos[3],
438  infos[4],
439  infos[5],
440  infos[6],
441  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
442  (&descriptor)),
445  return IsFakeQuantizationSupported(infos[0],
446  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
448  case LayerType::MemCopy:
449  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
450  case LayerType::Rank:
451  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
452  case LayerType::Shape:
453  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
455  {
456  if (infos.size() != 6)
457  {
458  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
459  "should be of format: {input, outputStateIn, cellStateIn, "
460  "hiddenStateOutputVal, cellStateOutputVal, output}");
461  }
462  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
463 
464  bool isHiddenStateOutputOptional = (infos[4] == TensorInfo());
465  bool isCellStateOutput = (infos[5] == TensorInfo());
466  if (isHiddenStateOutputOptional && isCellStateOutput)
467  {
469  infos[1],
470  infos[2],
471  infos[3],
472  EmptyOptional(),
473  EmptyOptional(),
474  desc,
475  lstmParamsInfo.value(),
477  }
478  else if (isHiddenStateOutputOptional)
479  {
481  infos[1],
482  infos[2],
483  infos[3],
484  EmptyOptional(),
485  infos[5],
486  desc,
487  lstmParamsInfo.value(),
489  }
490  else if (isCellStateOutput)
491  {
493  infos[1],
494  infos[2],
495  infos[3],
496  infos[4],
497  EmptyOptional(),
498  desc,
499  lstmParamsInfo.value(),
501  }
502  else
503  {
505  infos[1],
506  infos[2],
507  infos[3],
508  infos[4],
509  infos[5],
510  desc,
511  lstmParamsInfo.value(),
513  }
514  }
516  return IsPooling3dSupported(infos[0],
517  infos[1],
518  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
520  case LayerType::Map:
521  return true;
522  case LayerType::Unmap:
523  return true;
526  case LayerType::Merge:
527  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
530  infos[1],
531  infos[2],
532  infos[3],
533  infos[4],
534  quantizedLstmInputParamsInfo.value(),
536  default:
537  // layers not supported in neon by default:
538  // precompiled, standin, switch
539  return false;
540  }
541 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &output, const Optional< TensorInfo > &hiddenStateOutput, const Optional< TensorInfo > &cellStateOutput, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1704 of file RefLayerSupport.cpp.

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1709 {
1711 
1712  std::array<DataType, 1> supportedTypes =
1713  {
1715  };
1716 
1717  bool supported = true;
1718  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1719  "Reference LogicalBinary: input 0 type not supported");
1720  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1721  "Reference LogicalBinary: input 1 type not supported");
1722 
1723  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1724  "Reference LogicalBinary: input and output types do not match");
1725 
1726  return supported;
1727 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1729 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1733 {
1735 
1736  std::array<DataType, 3> supportedTypes =
1737  {
1741  };
1742 
1743  bool supported = true;
1744  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1745  "Reference LogSoftmax: input type not supported");
1746 
1747  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1748  "Reference LogSoftmax: output type not supported");
1749 
1750  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1751  "Reference LogSoftmax: input and output types do not match");
1752 
1753  return supported;
1754 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1756 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1766 {
1769 
1770  bool supported = true;
1771 
1772  std::array<DataType,3> supportedTypes = {
1776  };
1777 
1778  // check inputs and outputs
1779  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1780  "Reference Lstm: input is not a supported type.");
1781  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1782  "Reference Lstm: input and outputStateIn types are mismatched");
1783  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1784  "Reference Lstm: input and cellStateIn types are mismatched");
1785  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1786  "Reference Lstm: input and scratchBuffer types are mismatched");
1787  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1788  "Reference Lstm: input and outputStateOut types are mismatched");
1789  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1790  "Reference Lstm: input and cellStateOut types are mismatched");
1791 
1792  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1793  "Reference Lstm: input and output types are mismatched");
1794  // check layer parameters
1795  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1796  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1797  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1798  "Reference Lstm: input and InputToCellWeights types are mismatched");
1799  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1800  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1801  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1802  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1803  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1804  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1805  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1806  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1808  "Reference Lstm: input and ForgetGateBias types are mismatched");
1809  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1810  "Reference Lstm: input and CellBias types are mismatched");
1811  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1812  "Reference Lstm: input and OutputGateBias types are mismatched");
1813  if (!descriptor.m_CifgEnabled)
1814  {
1815  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1816  "Reference Lstm: input and InputToInputWeights types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1819  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1820  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1821  "Reference Lstm: input and InputGateBias types are mismatched");
1822  if (descriptor.m_PeepholeEnabled)
1823  {
1824  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1826  "Reference Lstm: input and CellToInputWeights types are mismatched");
1827  }
1828  }
1829  if (descriptor.m_PeepholeEnabled)
1830  {
1831  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1832  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1833  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1834  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1835  }
1836  if (descriptor.m_ProjectionEnabled)
1837  {
1838  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1839  "Reference Lstm: input and mProjectionWeights types are mismatched");
1840  if (paramsInfo.m_ProjectionBias != nullptr)
1841  {
1842  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1843  "Reference Lstm: input and ProjectionBias types are mismatched");
1844  }
1845  }
1846  if (descriptor.m_LayerNormEnabled)
1847  {
1848  if (!descriptor.m_CifgEnabled)
1849  {
1850  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1852  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1853  }
1854  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1856  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1857  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1859  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1860  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1862  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1863  }
1864 
1865  return supported;
1866 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo & output
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1868 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1872 {
1873  bool supported = true;
1874 
1875  std::array<DataType,7> supportedTypes = {
1883  };
1884 
1885  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1886  "Reference maximum: input 0 is not a supported type.");
1887 
1888  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1889  "Reference maximum: input 1 is not a supported type.");
1890 
1891  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1892  "Reference maximum: output is not a supported type.");
1893 
1894  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1895  "Reference maximum: input 0 and Input 1 types are mismatched");
1896 
1897  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1898  "Reference maximum: input and output types are mismatched");
1899 
1900  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1901  "Reference maximum: shapes are not suitable for implicit broadcast.");
1902 
1903  return supported;
1904 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1906 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::numeric_cast(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1910 {
1911  bool supported = true;
1912  std::string meanLayerStr = "Mean";
1913  std::string outputTensorStr = "output";
1914 
1915  std::array<DataType,6> supportedTypes =
1916  {
1923  };
1924 
1925  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1926  "Reference Mean: input type not supported.");
1927 
1928  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1929  "Reference Mean: input and output types are mismatched");
1930 
1931  if (descriptor.m_KeepDims)
1932  {
1933  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1935  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1937  meanLayerStr, outputTensorStr).data());
1938  }
1939  else if (descriptor.m_Axis.empty())
1940  {
1941  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1943  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1944  meanLayerStr, outputTensorStr).data());
1945  }
1946  else
1947  {
1948  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1949 
1950  if (outputDim > 0)
1951  {
1952  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1954  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1955  meanLayerStr, outputTensorStr).data());
1956  }
1957  else
1958  {
1959  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1961  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1962  meanLayerStr, outputTensorStr).data());
1963  }
1964  }
1965 
1966  return supported;
1967 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1969 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1972 {
1973  bool supported = true;
1974 
1975  std::array<DataType,7> supportedTypes =
1976  {
1984  };
1985 
1986  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1987  "Reference MemCopy: input type not supported");
1988 
1989  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1990  "Reference MemCopy: output type not supported");
1991 
1992  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1993  "Reference MemCopy: input and output types are mismatched");
1994 
1995  return supported;
1996 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1998 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2002 {
2003  bool supported = true;
2004 
2005  std::array<DataType,7> supportedTypes = {
2013  };
2014 
2015  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2016  "Reference minimum: input 0 is not a supported type.");
2017 
2018  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2019  "Reference minimum: input 1 is not a supported type.");
2020 
2021  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2022  "Reference minimum: output is not a supported type.");
2023 
2024  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2025  "Reference minimum: input 0 and Input 1 types are mismatched");
2026 
2027  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2028  "Reference minimum: input and output types are mismatched");
2029 
2030  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2031  "Reference minimum: shapes are not suitable for implicit broadcast.");
2032 
2033  return supported;
2034 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2036 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2040 {
2041  bool supported = true;
2042 
2043  std::array<DataType,7> supportedTypes = {
2051  };
2052 
2053  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2054  "Reference multiplication: input 0 is not a supported type.");
2055 
2056  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2057  "Reference multiplication: input 1 is not a supported type.");
2058 
2059  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2060  "Reference multiplication: output is not a supported type.");
2061 
2062  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2063  "Reference multiplication: input 0 and Input 1 types are mismatched");
2064 
2065  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2066  "Reference multiplication: input and output types are mismatched");
2067 
2068  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2069  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2070 
2071  return supported;
2072 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2074 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2078 {
2080 
2081  // Define supported types
2082  std::array<DataType, 6> supportedTypes =
2083  {
2090  };
2091 
2092  bool supported = true;
2093 
2094  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2095  "Reference normalization: input type not supported.");
2096 
2097  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2098  "Reference normalization: output type not supported.");
2099 
2100  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2101  "Reference normalization: input and output shapes have different "
2102  "num total elements.");
2103 
2104  return supported;
2105 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2107 of file RefLayerSupport.cpp.

Referenced by RefLayerSupport::IsLayerSupported().

2109 {
2110  return true;
2111 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2113 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2117 {
2119  bool supported = true;
2120 
2121  // Define supported output and inputs types.
2122  std::array<DataType,6> supportedTypes =
2123  {
2130  };
2131 
2132  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2133  "Reference pad: input is not a supported type.");
2134 
2135  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2136  "Reference pad: output is not a supported type.");
2137 
2138  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2139  "Reference pad: input and output types are mismatched.");
2140 
2141  return supported;
2142 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2144 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2148 {
2150  bool supported = true;
2151 
2152  // Define supported output and inputs types.
2153  std::array<DataType, 6> supportedTypes =
2154  {
2161  };
2162 
2163  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2164  "Reference permute: input is not a supported type.");
2165 
2166  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2167  "Reference permute: output is not a supported type.");
2168 
2169  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2170  "Reference permute: input and output types are mismatched.");
2171 
2172  return supported;
2173 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2175 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2179 {
2181  bool supported = true;
2182 
2183  // Define supported output and inputs types.
2184  std::array<DataType,6> supportedTypes =
2185  {
2192  };
2193 
2194  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2195  "Reference poolind2d: input is not a supported type.");
2196 
2197  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2198  "Reference poolind2d: output is not a supported type.");
2199 
2200  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2201  "Reference poolind2d: input and output types are mismatched.");
2202 
2203  return supported;
2204 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2206 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2210 {
2212  bool supported = true;
2213 
2214  // Define supported output and inputs types.
2215  std::array<DataType,6> supportedTypes =
2216  {
2223  };
2224 
2225  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2226  "Reference poolind3d: input is not a supported type.");
2227 
2228  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2229  "Reference poolind3d: output is not a supported type.");
2230 
2231  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2232  "Reference poolind3d: input and output types are mismatched.");
2233 
2234  return supported;
2235 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2665 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2669 {
2670  bool supported = true;
2671 
2672  std::array<DataType, 6> supportedTypes
2673  {
2680  };
2681 
2682  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2683  "PReLU: input is not a supported type.");
2684 
2685  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2686  "PReLU: alpha is not a supported type.");
2687 
2688  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2689  "PReLU: output is not a supported type.");
2690 
2691  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2692  "PReLU: input, alpha and output types are mismatched");
2693 
2694  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2695  "PReLU: shapes are not suitable for implicit broadcast");
2696 
2697  return supported;
2698 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & alpha
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2238 of file RefLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

2247 {
2248  IgnoreUnused(input);
2256 
2258 
2259  return true;
2260 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo & previousCellStateIn
const TensorInfo & previousOutputIn
const TensorInfo & output
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2262 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

2265 {
2266  bool supported = true;
2267 
2268  // Define supported input types.
2269  std::array<DataType,7> supportedInputTypes = {
2277  };
2278 
2279  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2280  "Reference quantize: input type not supported.");
2281 
2282  // Define supported output types.
2283  std::array<DataType,4> supportedOutputTypes = {
2287  DataType::QSymmS16
2288  };
2289  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2290  "Reference quantize: output type not supported.");
2291 
2292  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2293  "Reference quantize: input and output shapes have different num total elements.");
2294 
2295  return supported;
2296 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2298 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2301 {
2302  IgnoreUnused(input);
2303  // Define supported output types.
2304  std::array<DataType,1> supportedOutputTypes =
2305  {
2307  };
2308 
2309  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2310  "Reference rank: input type not supported.");
2311 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2313 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2317 {
2319  bool supported = true;
2320  std::array<DataType,7> supportedTypes =
2321  {
2329  };
2330 
2331  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2332  "Reference Reduce: input type not supported");
2333 
2334  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2335  "Reference Reduce: output type not supported");
2336 
2337  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2338  "Reference Reduce: input and output types not matching");
2339 
2340  return supported;
2341 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2343 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2347 {
2350  // Define supported output types.
2351  std::array<DataType,8> supportedOutputTypes =
2352  {
2361  };
2362 
2363  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2364  "Reference reshape: input type not supported.");
2365 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2367 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2371 {
2373  bool supported = true;
2374  std::array<DataType,6> supportedTypes =
2375  {
2382  };
2383 
2384  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2385  "Reference Resize: input type not supported");
2386 
2387  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2388  "Reference Resize: output type not supported");
2389 
2390  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2391  "Reference Resize: input and output types not matching");
2392 
2393  return supported;
2394 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2396 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2399 {
2400  IgnoreUnused(input);
2401  bool supported = true;
2402 
2403  std::array<DataType, 1> supportedTypes =
2404  {
2406  };
2407 
2408  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2409  "Reference Shape: output type not supported");
2410 
2411  return supported;
2412 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2414 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2418 {
2420  bool supported = true;
2421 
2422  std::array<DataType, 5> supportedTypes =
2423  {
2429  };
2430 
2431  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2432  "Reference Slice: input type not supported");
2433 
2434  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2435  "Reference Slice: output type not supported");
2436 
2437  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2438  "Reference Slice: input and output types are mismatched");
2439 
2440  return supported;
2441 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2443 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

2447 {
2449  bool supported = true;
2450  std::array<DataType,7> supportedTypes =
2451  {
2459  };
2460 
2461  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2462  "Reference Softmax: output type not supported");
2463 
2464  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2465  "Reference Softmax: input type not supported");
2466 
2467  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2468  "Reference Softmax: input type not supported");
2469 
2470  return supported;
2471 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2473 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2477 {
2479  bool supported = true;
2480  std::array<DataType,6> supportedTypes =
2481  {
2488  };
2489 
2490  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2491  "Reference SpaceToBatchNd: input type not supported");
2492 
2493  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2494  "Reference SpaceToBatchNd: output type not supported");
2495 
2496  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2497  "Reference SpaceToBatchNd: input and output types are mismatched");
2498 
2499  return supported;
2500 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2502 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2506 {
2507 
2509  bool supported = true;
2510 
2511  std::array<DataType,6> supportedTypes =
2512  {
2519  };
2520 
2521  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2522  "Reference SpaceToDepth: input type not supported");
2523 
2524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2525  "Reference SpaceToDepth: output type not supported");
2526 
2527  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2528  "Reference SpaceToDepth: input and output types are mismatched");
2529 
2530  return supported;
2531 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2533 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputs, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2537 {
2539  bool supported = true;
2540  std::array<DataType,6> supportedTypes =
2541  {
2548  };
2549 
2550  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2551  "Reference splitter: output type not supported");
2552  for (const TensorInfo& output : outputs)
2553  {
2554  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2555  "Reference splitter: input type not supported");
2556 
2557  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2558  "Reference splitter: input and output types mismatched.");
2559  }
2560 
2561  return supported;
2562 }
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2564 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2568 {
2570 
2571  bool supported = true;
2572  std::array<DataType,7> supportedTypes =
2573  {
2581  };
2582 
2583  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2584  "Reference stack: output type not supported");
2585  for (const TensorInfo* input : inputs)
2586  {
2587  ARMNN_ASSERT(input != nullptr);
2588  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2589  "Reference stack: input type not supported");
2590 
2591  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2592  "Reference stack: input and output types mismatched.");
2593  }
2594 
2595  return supported;
2596 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2598 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2602 {
2604  bool supported = true;
2605 
2606  std::array<DataType,5> supportedTypes =
2607  {
2613  };
2614 
2615  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2616  "Reference StridedSlice: input type not supported");
2617 
2618  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2619  "Reference StridedSlice: output type not supported");
2620 
2621  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2622  "Reference StridedSlice: input and output types are mismatched");
2623 
2624  return supported;
2625 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2627 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2631 {
2632  bool supported = true;
2633 
2634  std::array<DataType,7> supportedTypes = {
2642  };
2643 
2644  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2645  "Reference subtraction: input 0 is not a supported type.");
2646 
2647  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2648  "Reference subtraction: input 1 is not a supported type.");
2649 
2650  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2651  "Reference subtraction: output is not a supported type.");
2652 
2653  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2654  "Reference subtraction: input 0 and Input 1 types are mismatched");
2655 
2656  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2657  "Reference subtraction: input and output types are mismatched");
2658 
2659  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2660  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2661 
2662  return supported;
2663 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & output
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2700 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

2706 {
2708  bool supported = true;
2709 
2710  std::array<DataType,7> supportedTypes =
2711  {
2719  };
2720 
2721  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2722  "Reference TransposeConvolution2d: input is not a supported type.");
2723 
2724  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2725  "Reference TransposeConvolution2d: output is not a supported type.");
2726 
2727  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2728  "Reference TransposeConvolution2d: input and output types mismatched.");
2729 
2730 
2731  const DataType inputType = input.GetDataType();
2732  if (IsQuantized8BitType(inputType))
2733  {
2734  std::array<DataType, 3> supportedWeightTypes =
2735  {
2739  };
2740 
2741  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2742  "Reference TransposeConvolution2d: weights type not supported for "
2743  "quantized input.");
2744  }
2745  else
2746  {
2747  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2748  "Reference TransposeConvolution2d: weights is not a supported type.");
2749 
2750  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2751  "Reference TransposeConvolution2d: input and weights types mismatched.");
2752  }
2753 
2754  if (biases.has_value())
2755  {
2756  std::array<DataType,4> biasesSupportedTypes =
2757  {
2762  };
2763  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2764  "Reference TransposeConvolution2d: biases is not a supported type.");
2765  }
2766 
2767  return supported;
2768 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:35
const TensorInfo & output
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2770 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2774 {
2776  bool supported = true;
2777 
2778  // Define supported output and inputs types.
2779  std::array<DataType, 6> supportedTypes =
2780  {
2787  };
2788 
2789  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2790  "Reference transpose: input is not a supported type.");
2791 
2792  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2793  "Reference transpose: output is not a supported type.");
2794 
2795  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2796  "Reference transpose: input and output types are mismatched.");
2797 
2798  return supported;
2799 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo output,
const Optional< TensorInfo > &  hiddenStateOutput,
const Optional< TensorInfo > &  cellStateOutput,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2801 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), OptionalBase::has_value(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

2811 {
2816  bool supported = true;
2817 
2818  if (hiddenStateOutput.has_value() || cellStateOutput.has_value())
2819  {
2820  reasonIfUnsupported.value() += "Reference UnidirectionalSequenceLstm: hidden state output "
2821  "and cell state output are not supported at the moment.";
2822  }
2823 
2824  std::array<DataType, 1> supportedTypes =
2825  {
2827  };
2828 
2829  std::array<DataType, 2> supportedWeightTypes =
2830  {
2833  };
2834 
2835  // check inputs and outputs
2836  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2837  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2838  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
2839  "Reference UnidirectionalSequenceLstm: input and outputStateIn types are mismatched");
2840  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
2841  "Reference UnidirectionalSequenceLstm: input and cellStateIn types are mismatched");
2842 
2843  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2844  "Reference UnidirectionalSequenceLstm: input and output types are mismatched");
2845  // check layer parameters
2846  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2848  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2849  "is not a supported type.");
2850  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2852  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2853  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2855  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2856  "is not a supported type.");
2857  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2859  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2860  "is not a supported type.");
2861  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2863  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2864  "is not a supported type.");
2865  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2867  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2868  "is not a supported type.");
2869  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
2870  "Reference UnidirectionalSequenceLstm: input and ForgetGateBias types "
2871  "are mismatched");
2872  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
2873  "Reference UnidirectionalSequenceLstm: input and CellBias types are mismatched");
2874  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
2875  "Reference UnidirectionalSequenceLstm: input and OutputGateBias types "
2876  "are mismatched");
2877  if (!descriptor.m_CifgEnabled)
2878  {
2879  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2881  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2882  "is not a supported type.");
2883  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2885  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2886  "is not a supported type.");
2887  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
2888  "Reference UnidirectionalSequenceLstm: input and InputGateBias types "
2889  "are mismatched");
2890  if (descriptor.m_PeepholeEnabled)
2891  {
2892  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2894  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2895  "is not a supported type.");
2896  }
2897  }
2898  if (descriptor.m_PeepholeEnabled)
2899  {
2900  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2902  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2903  "is not a supported type.");
2904  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2906  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2907  "is not a supported type.");
2908  }
2909  if (descriptor.m_ProjectionEnabled)
2910  {
2911  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2913  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2914  "is not a supported type.");
2915  if (paramsInfo.m_ProjectionBias != nullptr)
2916  {
2917  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2918  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2919  "are mismatched");
2920  }
2921  }
2922  if (descriptor.m_LayerNormEnabled)
2923  {
2924  if (!descriptor.m_CifgEnabled)
2925  {
2926  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2928  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2929  "is not a supported type.");
2930  }
2931  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2933  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2934  "is not a supported type.");
2935  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2937  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2938  "is not a supported type.");
2939  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2941  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2942  "is not a supported type.");
2943  }
2944 
2945  return supported;
2946 }
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > & hiddenStateOutput
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo const TensorInfo const TensorInfo const Optional< TensorInfo > const Optional< TensorInfo > & cellStateOutput
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo & output
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

The documentation for this class was generated from the following files: