ArmNN
 22.05
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertBf16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToBf16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo *> &inputs, const std::vector< const TensorInfo *> &outputs, const StandInDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from ILayerSupport
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsActivationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsAdditionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsArgMinMaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsBatchNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsBatchToSpaceNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsCastSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsChannelShuffleSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsComparisonSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertBf16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertFp32ToBf16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertFp16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvertFp32ToFp16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConvolution3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDebugSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDepthToSpaceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDequantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsDivisionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsElementwiseUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFakeQuantizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFillSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFloorSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsFullyConnectedSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsGatherSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsInputSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsInstanceNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsL2NormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLogicalBinarySupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLogicalUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLogSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMaximumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMeanSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMemCopySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMemImportSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMergeSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMinimumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsMultiplicationSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsOutputSupported(const TensorInfo &output
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPadSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPermuteSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPooling2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPooling3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPreCompiledSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsPreluSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsQuantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsQLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsQuantizedLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsRankSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsReduceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsReshapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsResizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsShapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSliceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSpaceToBatchNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSpaceToDepthSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSplitterSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSubtractionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsSwitchSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsTransposeConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsTransposeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input
 

Additional Inherited Members

- Public Attributes inherited from ILayerSupport
const TensorInfooutput
 
const TensorInfo const ActivationDescriptordescriptor
 
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoinput1
 
const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ArgMinMaxDescriptordescriptor
 
const TensorInfo const ArgMinMaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfomean
 
const TensorInfo const TensorInfo const TensorInfovar
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfobeta
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfogamma
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const BatchToSpaceNdDescriptordescriptor
 
const TensorInfo const BatchToSpaceNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ChannelShuffleDescriptordescriptor
 
const TensorInfo const ChannelShuffleDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const ComparisonDescriptordescriptor
 
const TensorInfo const TensorInfo const ComparisonDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsConcatSupported(const std Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution2dDescriptordescriptor
 
const TensorInfo const Convolution2dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution3dDescriptordescriptor
 
const TensorInfo const Convolution3dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthToSpaceDescriptordescriptor
 
const TensorInfo const DepthToSpaceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthwiseConvolution2dDescriptordescriptor
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoscores
 
const TensorInfo const TensorInfoanchors
 
const TensorInfo const TensorInfo const TensorInfodetectionBoxes
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionClasses
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionScores
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfonumDetections
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const =0
 
const TensorInfo const ElementwiseUnaryDescriptordescriptor
 
const TensorInfo const ElementwiseUnaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const FakeQuantizationDescriptordescriptor
 
const FakeQuantizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const FillDescriptordescriptor
 
const TensorInfo const FillDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoweights
 
const TensorInfo const TensorInfo const TensorInfobiases
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const GatherDescriptordescriptor
 
const TensorInfo const TensorInfo const GatherDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const InstanceNormalizationDescriptordescriptor
 
const TensorInfo const InstanceNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const L2NormalizationDescriptordescriptor
 
const TensorInfo const L2NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptordescriptor
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const LogSoftmaxDescriptordescriptor
 
const TensorInfo const LogSoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfooutputStateIn
 
const TensorInfo const TensorInfocellStateIn
 
const TensorInfo const TensorInfo const TensorInfoscratchBuffer
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const MeanDescriptordescriptor
 
const TensorInfo const MeanDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoouput
 
const TensorInfo const NormalizationDescriptordescriptor
 
const TensorInfo const NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PadDescriptordescriptor
 
const TensorInfo const PadDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PermuteDescriptordescriptor
 
const TensorInfo const PermuteDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling2dDescriptordescriptor
 
const TensorInfo const Pooling2dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling3dDescriptordescriptor
 
const TensorInfo const Pooling3dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const PreCompiledDescriptordescriptor
 
const PreCompiledDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoalpha
 
const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReduceDescriptordescriptor
 
const TensorInfo const ReduceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReshapeDescriptordescriptor
 
const TensorInfo const ReshapeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ResizeDescriptordescriptor
 
const TensorInfo const ResizeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SliceDescriptordescriptor
 
const TensorInfo const SliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SoftmaxDescriptordescriptor
 
const TensorInfo const SoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToBatchNdDescriptordescriptor
 
const TensorInfo const SpaceToBatchNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToDepthDescriptordescriptor
 
const TensorInfo const SpaceToDepthDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptordescriptor
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStandInSupported(const std const TensorInfooutput
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptordescriptor
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "22.11") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfooutput0
 
const TensorInfo const TensorInfo const TensorInfooutput1
 
const TensorInfo const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeConvolution2dDescriptordescriptor
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeDescriptordescriptor
 
const TensorInfo const TransposeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 506 of file RefLayerSupport.cpp.

References armnn::Abs, armnn::BFloat16, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by RefLayerSupport::IsLayerSupported().

510 {
511  bool supported = true;
512 
513  // Define supported types.
514  std::array<DataType,6> supportedTypes = {
521  };
522 
523  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
524  "Reference activation: input type not supported.");
525 
526  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
527  "Reference activation: output type not supported.");
528 
529  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
530  "Reference activation: input and output types mismatched.");
531 
532  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
533  "Reference activation: input and output shapes are of different rank.");
534 
535 
536  struct ActivationFunctionSupported : public Rule
537  {
538  ActivationFunctionSupported(const ActivationDescriptor& desc)
539  {
540  switch(desc.m_Function)
541  {
554  {
555  m_Res = true;
556  break;
557  }
558  default:
559  {
560  m_Res = false;
561  break;
562  }
563  }
564  }
565  };
566 
567  // Function is supported
568  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
569  "Reference activation: function not supported.");
570 
571  return supported;
572 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
min(a, max(b, input)) ReLu1 & ReLu6.
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 574 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported(), and TEST_SUITE().

578 {
579  bool supported = true;
580 
581  std::array<DataType,7> supportedTypes = {
589  };
590 
591  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
592  "Reference addition: input 0 is not a supported type.");
593 
594  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
595  "Reference addition: input 1 is not a supported type.");
596 
597  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
598  "Reference addition: output is not a supported type.");
599 
600  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
601  "Reference addition: input 0 and Input 1 types are mismatched");
602 
603  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
604  "Reference addition: input and output types are mismatched");
605 
606  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
607  "Reference addition: shapes are not suitable for implicit broadcast.");
608 
609  return supported;
610 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 612 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

615 {
617 
618  std::array<DataType, 8> supportedInputTypes =
619  {
628  };
629 
630  std::array<DataType,2> supportedOutputTypes = {
632  DataType::Signed64
633  };
634 
635  bool supported = true;
636 
637  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
638  "Reference ArgMinMax: input is not a supported type.");
639  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
640  "Reference ArgMinMax: output type not supported");
641 
642  return supported;
643 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 645 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

653 {
655 
656  std::array<DataType, 6> supportedTypes =
657  {
664  };
665 
666  bool supported = true;
667 
668  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
669  "Reference batch normalization: input is not a supported type.");
670 
671  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
672  "Reference batch normalization: output is not a supported type.");
673 
674  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
675  "Reference batch normalization: input and output types are mismatched");
676 
677  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
678  "Reference batch normalization: mean is not a supported type.");
679 
680  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
681  "Reference batch normalization: variance is not a supported type.");
682 
683  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
684  "Reference batch normalization: beta is not a supported type.");
685 
686  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
687  "Reference batch normalization: gamma is not a supported type.");
688 
689  return supported;
690 }
const TensorInfo & output
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const TensorInfo & mean

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 692 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

696 {
698 
699  bool supported = true;
700 
701  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
702  std::string inputTensorStr = "input";
703  std::string outputTensorStr = "output";
704 
705  // Define supported types.
706  std::array<DataType,6> supportedTypes =
707  {
714  };
715 
716  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
717  "Reference BatchToSpaceNd: input type not supported.");
718 
719  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
720  "Reference BatchToSpaceNd: output type not supported.");
721 
722  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
723  "Reference BatchToSpaceNd: input and output types mismatched.");
724 
725  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
727  CreateIncorrectDimensionsErrorMsg(4,
729  batchToSpaceNdLayerStr,
730  outputTensorStr).data());
731 
732  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
734  CreateIncorrectDimensionsErrorMsg(4,
735  input.GetNumDimensions(),
736  batchToSpaceNdLayerStr,
737  inputTensorStr).data());
738 
739  return supported;
740 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 742 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

745 {
746  std::array<DataType, 9> supportedInputTypes =
747  {
756  };
757 
758  bool supported = true;
759  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
760  "Reference cast: input is not a supported type");
761 
762 
763  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
764  "Reference cast: output is not a supported type");
765 
766  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
767  "Reference cast: input and output shapes have different number of total elements");
768 
769  return supported;
770 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 772 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

776 {
778  bool supported = true;
779 
780  // Define supported output and inputs types.
781  std::array<DataType, 7> supportedTypes =
782  {
790  };
791 
792  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
793  "Reference ChannelShuffle: input is not a supported type.");
794 
795  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
796  "Reference ChannelShuffle: output is not a supported type.");
797 
798  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
799  "Reference ChannelShuffle: input and output types are mismatched.");
800 
801  return supported;
802 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 805 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

810 {
812  std::array<DataType, 8> supportedInputTypes =
813  {
822  };
823 
824  bool supported = true;
825  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
826  "Reference comparison: input 0 is not a supported type");
827 
828  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
829  "Reference comparison: input 0 and Input 1 types are mismatched");
830 
832  "Reference comparison: output is not of type Boolean");
833 
834  return supported;
835 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo *>  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 837 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

841 {
843 
844  bool supported = true;
845  std::array<DataType,7> supportedTypes =
846  {
854  };
855 
856  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
857  "Reference concatenation: output type not supported");
858  for (const TensorInfo* input : inputs)
859  {
860  ARMNN_ASSERT(input != nullptr);
861  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
862  "Reference concatenation: input type not supported");
863 
864  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
865  "Reference concatenation: input and output types mismatched.");
866  }
867 
868  return supported;
869 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 871 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

873 {
874  std::array<DataType,8> supportedTypes =
875  {
884  };
885 
886  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
887  "Reference constant: output is not a supported type.");
888 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertBf16ToFp32Supported()

bool IsConvertBf16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 890 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

Referenced by RefLayerSupport::IsLayerSupported().

893 {
894  bool supported = true;
895 
896  supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported,
897  "Reference for ConvertBf16ToFp32 layer: input type not supported");
898 
900  "Reference for ConvertBf16ToFp32 layer: output type not supported");
901 
902  return supported;
903 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 905 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

908 {
910  input.GetDataType(),
911  &TrueFunc<>,
912  &FalseInputFuncF32<>,
913  &FalseFuncU8<>,
914  &FalseFuncI32<>,
915  &FalseFuncU8<>) &&
918  &FalseOutputFuncF16<>,
919  &TrueFunc<>,
920  &FalseFuncU8<>,
921  &FalseFuncI32<>,
922  &FalseFuncU8<>));
923 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
DataType GetDataType() const
Definition: Tensor.hpp:198
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvertFp32ToBf16Supported()

bool IsConvertFp32ToBf16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 925 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), and armnn::Float32.

Referenced by RefLayerSupport::IsLayerSupported().

928 {
929  bool supported = true;
930 
931  supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported,
932  "Reference for ConvertFp32ToBf16 layer: input type not supported");
933 
935  "Reference for ConvertFp32ToBf16 layer: output type not supported");
936 
937  return supported;
938 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 940 of file RefLayerSupport.cpp.

References TensorInfo::GetDataType(), and armnn::IsSupportedForDataTypeGeneric().

Referenced by RefLayerSupport::IsLayerSupported().

943 {
945  input.GetDataType(),
946  &FalseInputFuncF16<>,
947  &TrueFunc<>,
948  &FalseFuncU8<>,
949  &FalseFuncI32<>,
950  &FalseFuncU8<>) &&
953  &TrueFunc<>,
954  &FalseOutputFuncF32<>,
955  &FalseFuncU8<>,
956  &FalseFuncI32<>,
957  &FalseFuncU8<>));
958 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
DataType GetDataType() const
Definition: Tensor.hpp:198
bool IsSupportedForDataTypeGeneric(Optional< std::string &> reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 960 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

966 {
967  bool supported = true;
968 
969  // Define supported types.
970  std::array<DataType,7> supportedTypes =
971  {
979  };
980 
981  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
982  "Reference Convolution2d: input is not a supported type.");
983 
984  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
985  "Reference Convolution2d: output is not a supported type.");
986 
987  // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization.
988  if (input.GetDataType() == DataType::BFloat16)
989  {
991  {
992  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
993  supported = false;
994  }
995  }
996  else
997  {
998  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
999  "Reference Convolution2d: input and output types mismatched.");
1000  }
1001 
1002  const DataType inputType = input.GetDataType();
1003  if (IsQuantized8BitType(inputType))
1004  {
1005  std::array<DataType, 3> supportedWeightTypes =
1006  {
1010  };
1011 
1012  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1013  "Reference Convolution2d: weights type not supported for quantized input.");
1014  }
1015  else
1016  {
1017  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1018  "Reference Convolution2d: weights is not a supported type.");
1019 
1020  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1021  "Reference Convolution2d: input and weights types mismatched.");
1022  }
1023 
1024  if (biases.has_value())
1025  {
1026  std::array<DataType,4> biasesSupportedTypes =
1027  {
1032  };
1033 
1034  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1035  "Reference Convolution2d: biases is not a supported type.");
1036  }
1038 
1039  return supported;
1040 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1042 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1048 {
1049  bool supported = true;
1050 
1051  // Define supported types.
1052  std::array<DataType,7> supportedTypes =
1053  {
1061  };
1062 
1063  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1064  "Reference Convolution3d: input is not a supported type.");
1065 
1066  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1067  "Reference Convolution3d: output is not a supported type.");
1068 
1069  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1070  "Reference Convolution3d: input and output types mismatched.");
1071 
1072  const DataType inputType = input.GetDataType();
1073  if (IsQuantized8BitType(inputType))
1074  {
1075  std::array<DataType, 3> supportedWeightTypes =
1076  {
1080  };
1081 
1082  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1083  "Reference Convolution3d: weights type not supported for quantized input.");
1084  }
1085  else
1086  {
1087  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1088  "Reference Convolution3d: weights is not a supported type.");
1089 
1090  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1091  "Reference Convolution3d: input and weights types mismatched.");
1092  }
1093 
1094  if (biases.has_value())
1095  {
1096  std::array<DataType,4> biasesSupportedTypes =
1097  {
1102  };
1103 
1104  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1105  "Reference Convolution3d: biases is not a supported type.");
1106  }
1108 
1109  return supported;
1110 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1112 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1115 {
1116  bool supported = true;
1117 
1118  std::array<DataType, 8> supportedTypes =
1119  {
1128  };
1129 
1130  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1131  "Reference for Debug layer: input type not supported");
1132 
1133  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1134  "Reference for Debug layer: output type not supported");
1135 
1136  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1137  "Reference for Debug layer: input and output types are mismatched");
1138 
1139  return supported;
1140 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1142 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1146 {
1148  bool supported = true;
1149 
1150  std::array<DataType,6> supportedTypes =
1151  {
1158  };
1159 
1160  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1161  "Reference DepthToSpace: input type not supported");
1162 
1163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1164  "Reference DepthToSpace: output type not supported");
1165 
1166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1167  "Reference DepthToSpace: input and output types are mismatched");
1168 
1169  return supported;
1170 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1172 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().

1178 {
1180  bool supported = true;
1181 
1182  // Define supported types.
1183  std::array<DataType,7> supportedTypes =
1184  {
1192  };
1193 
1194  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1195  "Reference DepthwiseConvolution2d: input is not a supported type.");
1196 
1197  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1198  "Reference DepthwiseConvolution2d: output is not a supported type.");
1199 
1200  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1201  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1202 
1203  const DataType inputType = input.GetDataType();
1204  if (IsQuantized8BitType(inputType))
1205  {
1206  std::array<DataType, 3> supportedWeightTypes =
1207  {
1211  };
1212 
1213  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1214  "Reference DepthwiseConvolution2d: weights type not supported for "
1215  "quantized input.");
1216  }
1217  else
1218  {
1219  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1220  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1221 
1222  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1223  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1224  }
1225 
1226  if (biases.has_value())
1227  {
1228  std::array<DataType,4> biasesSupportedTypes =
1229  {
1234  };
1235  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1236  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1237  }
1238 
1239  return supported;
1240 
1241 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1243 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

1246 {
1247  bool supported = true;
1248 
1249  std::array<DataType,5> supportedInputTypes = {
1255  };
1256 
1257  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1258  "Reference for Dequantize layer: input type not supported.");
1259 
1260  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1261  "Reference for Dequantize layer: per-axis quantized input not supported.");
1262 
1263  std::array<DataType,3> supportedOutputTypes = {
1266  DataType::Float16
1267  };
1268 
1269  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1270  "Reference for Dequantize layer: output type not supported.");
1271 
1272  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1273  "Reference for Dequantize layer: input/output shapes have different num total "
1274  "elements.");
1275 
1276  return supported;
1277 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1279 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1288 {
1290 
1291  bool supported = true;
1292 
1293  std::array<DataType,6> supportedInputTypes =
1294  {
1301  };
1302 
1303  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1304  "Reference DetectionPostProcess: input 0 is not a supported type.");
1305 
1306  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1307  "Reference DetectionPostProcess: input 1 is not a supported type.");
1308 
1309  return supported;
1310 }
const TensorInfo const TensorInfo & anchors
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & scores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1312 of file RefLayerSupport.cpp.

References RefLayerSupport::IsDepthwiseConvolutionSupported().

1318 {
1320 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1322 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1326 {
1327  bool supported = true;
1328 
1329  std::array<DataType,7> supportedTypes = {
1337  };
1338 
1339  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1340  "Reference division: input 0 is not a supported type.");
1341 
1342  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1343  "Reference division: input 1 is not a supported type.");
1344 
1345  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1346  "Reference division: output is not a supported type.");
1347 
1348  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1349  "Reference division: input 0 and Input 1 types are mismatched");
1350 
1351  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1352  "Reference division: input and output types are mismatched");
1353 
1354  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1355  "Reference division: shapes are not suitable for implicit broadcast.");
1356 
1357  return supported;
1358 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1360 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1364 {
1366 
1367  std::array<DataType, 7> supportedTypes =
1368  {
1376  };
1377 
1378  std::array<DataType, 1> logicalSupportedTypes =
1379  {
1381  };
1382 
1383  bool supported = true;
1384 
1385  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1386  {
1387  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1388  "Reference elementwise unary: input type not supported");
1389 
1390  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1391  "Reference elementwise unary: output type not supported");
1392  }
1393  else
1394  {
1395  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1396  "Reference elementwise unary: input type not supported");
1397 
1398  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1399  "Reference elementwise unary: output type not supported");
1400  }
1401 
1402  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1403  "Reference elementwise unary: input and output types not matching");
1404 
1405  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1406  "Reference elementwise unary: input and output shapes"
1407  "have different number of total elements");
1408 
1409  return supported;
1410 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1412 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1415 {
1417  bool supported = true;
1418 
1419  std::array<DataType,1> supportedTypes =
1420  {
1422  };
1423 
1424  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1425  "Reference fake quantization: input type not supported.");
1426 
1427  return supported;
1428 }
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1430 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1434 {
1437 
1438  bool supported = true;
1439 
1440  std::array<DataType,3> supportedTypes =
1441  {
1445  };
1446 
1447  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1448  "Reference Fill: input type not supported.");
1449 
1450  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1451  "Reference Fill: output type not supported.");
1452  return supported;
1453 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1455 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1458 {
1460  bool supported = true;
1461 
1462  std::array<DataType,3> supportedTypes =
1463  {
1467  };
1468 
1469  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1470  "Reference Floor: input type not supported.");
1471 
1472  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1473  "Reference Floor: output type not supported.");
1474 
1475  return supported;
1476 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1478 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1484 {
1485  bool supported = true;
1486 
1487  // Define supported types.
1488  std::array<DataType,6> supportedTypes =
1489  {
1496  };
1497 
1498  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1499  "Reference Fully Connected: input type not supported.");
1500 
1501  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1502  "Reference Fully Connected: output type not supported.");
1503 
1504  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1505  "Reference Fully Connected: weights type not supported.");
1506 
1507  // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization.
1508  if (input.GetDataType() == DataType::BFloat16)
1509  {
1511  {
1512  reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n";
1513  supported = false;
1514  }
1515  }
1516  else
1517  {
1518  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1519  "Reference Fully Connected: input and output types mismatched.");
1520  }
1521 
1522  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1523  "Reference Fully Connected: weights is not a supported type.");
1524 
1525  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1526  "Reference Fully Connected: input and weights types mismatched.");
1527 
1528  if (descriptor.m_BiasEnabled)
1529  {
1530  // Defined supported types for bias
1531  std::array<DataType, 5>
1532  supportedBiasTypes =
1533  {
1539  };
1540 
1541  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1542  "Reference Fully Connected: bias type not supported.");
1543 
1544  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1545  "Reference Fully Connected: bias and weight types mismatch.");
1546 
1547  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1548  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1549 
1550  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1551  "Reference Fully Connected: bias must have 1 dimension.");
1552 
1553  }
1554 
1555  return supported;
1556 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
DataType GetDataType() const
Definition: Tensor.hpp:198
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1558 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1562 {
1563  bool supported = true;
1564  std::array<DataType,7> supportedTypes =
1565  {
1573  };
1574 
1575  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1576  "Reference GatherNd: input type not supported");
1577 
1578  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1579  "Reference GatherNd: output type not supported");
1580 
1581  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1582  "Reference GatherNd: indices (input1) type not supported");
1583 
1584  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1585  "Reference GatherNd: input and output types not matching");
1586 
1587  return supported;
1588 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1590 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, GatherDescriptor::m_Axis, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

1595 {
1596  bool supported = true;
1597  std::array<DataType,7> supportedTypes =
1598  {
1606  };
1607 
1608  if (descriptor.m_Axis != 0)
1609  {
1610  reasonIfUnsupported.value() += std::string("Reference Gather: axis not supported\n");
1611  supported &= false;
1612  }
1613  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1614  "Reference Gather: input type not supported");
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1617  "Reference Gather: output type not supported");
1618 
1619  supported &= CheckSupportRule(TypeIs(input1, DataType::Signed32), reasonIfUnsupported,
1620  "Reference Gather: indices (input1) type not supported");
1621 
1622  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1623  "Reference Gather: input and output types not matching");
1624 
1625  return supported;
1626 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1628 of file RefLayerSupport.cpp.

Referenced by RefLayerSupport::IsLayerSupported().

1630 {
1631  return true;
1632 }

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1634 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1638 {
1640  // Define supported types
1641  std::array<DataType, 3> supportedTypes =
1642  {
1646  };
1647 
1648  bool supported = true;
1649 
1650  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1651  "Reference Instance Normalization: input type not supported.");
1652 
1653  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1654  "Reference Instance Normalization: output type not supported.");
1655 
1656  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1657  "Reference Instance Normalization: input and output types mismatched.");
1658 
1659  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1660  "Reference Instance Normalization: input and output shapes have different "
1661  "num total elements.");
1662 
1663  return supported;
1664 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1666 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1670 {
1672  // Define supported types
1673  std::array<DataType, 6> supportedTypes =
1674  {
1681  };
1682 
1683  bool supported = true;
1684 
1685  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1686  "Reference L2normalization: input type not supported.");
1687 
1688  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1689  "Reference L2normalization: output type not supported.");
1690 
1691  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1692  "Reference L2normalization: input and output types mismatched.");
1693 
1694  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1695  "Reference L2normalization: input and output shapes have different "
1696  "num total elements.");
1697 
1698  return supported;
1699 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmInputParamsInfo,
Optional< std::string &>  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertBf16ToFp32, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToBf16, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, ILayerSupport::descriptor, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertBf16ToFp32Supported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToBf16Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, ILayerSupport::reasonIfUnsupported, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchNormalizationSupported(infos[0],
84  infos[1],
85  infos[2],
86  infos[3],
87  infos[4],
88  infos[5],
89  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
90  (&descriptor)),
93  return IsBatchToSpaceNdSupported(infos[0],
94  infos[1],
95  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
98  return IsComparisonSupported(infos[0],
99  infos[1],
100  infos[2],
101  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
103  case LayerType::Concat:
104  {
105  std::vector<const TensorInfo*> inputInfos;
106  for (uint32_t i = 0; i < (infos.size() - 1); i++)
107  {
108  inputInfos.push_back(&infos[i]);
109  }
110  return IsConcatSupported(inputInfos,
111  infos[infos.size() - 1],
112  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
114  }
115  case LayerType::Constant:
116  return IsConstantSupported(infos[0], reasonIfUnsupported);
118  return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
120  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
122  return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
124  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
126  {
127  if (infos.size() != 4)
128  {
129  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130  "TensorInfos should be of format: {input, output, weights, biases}.");
131  }
132 
133  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
134  if (infos[3] == TensorInfo())
135  {
136  return IsConvolution2dSupported(infos[0],
137  infos[1],
138  desc,
139  infos[2],
140  EmptyOptional(),
142  }
143  else
144  {
145  return IsConvolution2dSupported(infos[0],
146  infos[1],
147  desc,
148  infos[2],
149  infos[3],
151  }
152  }
154  return IsDepthToSpaceSupported(infos[0],
155  infos[1],
156  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
159  {
160  if (infos.size() != 4)
161  {
162  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
163  "TensorInfos should be of format: {input, output, weights, biases}.");
164  }
165 
166  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
167  if (infos[3] == TensorInfo())
168  {
169  return IsDepthwiseConvolutionSupported(infos[0],
170  infos[1],
171  desc,
172  infos[2],
173  EmptyOptional(),
175  }
176  else
177  {
178  return IsDepthwiseConvolutionSupported(infos[0],
179  infos[1],
180  desc,
181  infos[2],
182  infos[3],
184  }
185  }
187  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
188  case LayerType::Division:
189  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
191  return IsElementwiseUnarySupported(infos[0],
192  infos[1],
193  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
195  case LayerType::Fill:
196  return IsFillSupported(infos[0],
197  infos[1],
198  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
200  case LayerType::Floor:
201  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
203  return IsFullyConnectedSupported(infos[0],
204  infos[1],
205  infos[2],
206  infos[3],
207  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
209  case LayerType::Gather:
210  return IsGatherSupported(infos[0],
211  infos[1],
212  infos[2],
213  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
215  case LayerType::GatherNd:
216  return IsGatherNdSupported(infos[0],
217  infos[1],
218  infos[2],
220  case LayerType::Input:
221  return IsInputSupported(infos[0], reasonIfUnsupported);
223  return IsInstanceNormalizationSupported(infos[0],
224  infos[1],
225  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
226  (&descriptor)),
229  return IsL2NormalizationSupported(infos[0],
230  infos[1],
231  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
234  return IsLogicalBinarySupported(infos[0],
235  infos[1],
236  infos[2],
237  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
240  return IsLogSoftmaxSupported(infos[0],
241  infos[1],
242  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
244  case LayerType::Lstm:
245  return IsLstmSupported(infos[0],
246  infos[1],
247  infos[2],
248  infos[3],
249  infos[4],
250  infos[5],
251  infos[6],
252  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
253  lstmParamsInfo.value(),
255  case LayerType::QLstm:
256  return IsQLstmSupported(infos[0],
257  infos[1],
258  infos[2],
259  infos[3],
260  infos[4],
261  infos[5],
262  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
263  lstmParamsInfo.value(),
265  case LayerType::Maximum:
266  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
267  case LayerType::Mean:
268  return IsMeanSupported(infos[0],
269  infos[1],
270  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
272  case LayerType::Minimum:
273  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
275  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
277  return IsNormalizationSupported(infos[0],
278  infos[1],
279  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
281  case LayerType::Output:
282  return IsOutputSupported(infos[0], reasonIfUnsupported);
283  case LayerType::Pad:
284  return IsPadSupported(infos[0],
285  infos[1],
286  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
288  case LayerType::Permute:
289  return IsPermuteSupported(infos[0],
290  infos[1],
291  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
294  return IsPooling2dSupported(infos[0],
295  infos[1],
296  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
298  case LayerType::Prelu:
299  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
300  case LayerType::Quantize:
301  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
302  case LayerType::Reshape:
303  return IsReshapeSupported(infos[0],
304  infos[1],
305  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
307  case LayerType::Resize:
308  return IsResizeSupported(infos[0],
309  infos[1],
310  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
312  case LayerType::Reduce:
313  return IsReduceSupported(infos[0],
314  infos[1],
315  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
317  case LayerType::Slice:
318  return IsSliceSupported(infos[0],
319  infos[1],
320  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
322  case LayerType::Softmax:
323  return IsSoftmaxSupported(infos[0],
324  infos[1],
325  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
328  return IsSpaceToBatchNdSupported(infos[0],
329  infos[1],
330  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
333  return IsSpaceToDepthSupported(infos[0],
334  infos[1],
335  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
337  case LayerType::Splitter:
338  {
339  std::vector<TensorInfo> outputInfos;
340  for (uint32_t i = 1; i < infos.size(); i++)
341  {
342  outputInfos.push_back(infos[i]);
343  }
344  return IsSplitterSupported(infos[0],
345  {outputInfos.begin(), outputInfos.end()},
346  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
348  }
349  case LayerType::Stack:
350  {
351  std::vector<const TensorInfo*> inputInfos;
352  for (uint32_t i = 0; i < infos.size() - 1; i++)
353  {
354  inputInfos.push_back(&infos[i]);
355  }
356  return IsStackSupported(inputInfos,
357  infos[infos.size() - 1],
358  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
360  }
362  return IsStridedSliceSupported(infos[0],
363  infos[1],
364  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
367  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
369  return IsTransposeSupported(infos[0],
370  infos[1],
371  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
374  {
375  if (infos.size() != 4)
376  {
377  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
378  "TensorInfos should be of format: {input, output, weights, biases}.");
379  }
380 
381  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
382  if (infos[3] == TensorInfo())
383  {
384  return IsTransposeConvolution2dSupported(infos[0],
385  infos[1],
386  desc,
387  infos[2],
388  EmptyOptional(),
390  }
391  else
392  {
393  return IsTransposeConvolution2dSupported(infos[0],
394  infos[1],
395  desc,
396  infos[2],
397  infos[3],
399  }
400  }
401  case LayerType::Cast:
402  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
404  return IsChannelShuffleSupported(infos[0],
405  infos[1],
406  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
409  {
410  if (infos.size() != 4)
411  {
412  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
413  "TensorInfos should be of format: {input, output, weights, biases}.");
414  }
415 
416  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
417  if (infos[3] == TensorInfo())
418  {
419  return IsConvolution3dSupported(infos[0],
420  infos[1],
421  desc,
422  infos[2],
423  EmptyOptional(),
425  }
426  else
427  {
428  return IsConvolution3dSupported(infos[0],
429  infos[1],
430  desc,
431  infos[2],
432  infos[3],
434  }
435  }
436  case LayerType::Debug:
437  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
439  return IsDetectionPostProcessSupported(infos[0],
440  infos[1],
441  infos[2],
442  infos[3],
443  infos[4],
444  infos[5],
445  infos[6],
446  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
447  (&descriptor)),
450  return IsFakeQuantizationSupported(infos[0],
451  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
453  case LayerType::MemCopy:
454  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
455  case LayerType::Rank:
456  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
457  case LayerType::Shape:
458  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
460  {
461  if (infos.size() != 6)
462  {
463  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
464  "should be of format: {input, outputStateIn, cellStateIn, "
465  "hiddenStateOutputVal, cellStateOutputVal, output}");
466  }
467  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
469  infos[1],
470  infos[2],
471  infos[3],
472  infos[4],
473  infos[5],
474  desc,
475  lstmParamsInfo.value(),
477  }
479  return IsPooling3dSupported(infos[0],
480  infos[1],
481  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
483  case LayerType::Map:
484  return true;
485  case LayerType::Unmap:
486  return true;
489  case LayerType::Merge:
490  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
493  infos[1],
494  infos[2],
495  infos[3],
496  infos[4],
497  quantizedLstmInputParamsInfo.value(),
499  default:
500  // layers not supported in neon by default:
501  // precompiled, standin, switch
502  return false;
503  }
504 }
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStackSupported(const std::vector< const TensorInfo *> &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertBf16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported) const override
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
const TensorInfo const ActivationDescriptor & descriptor
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConcatSupported(const std::vector< const TensorInfo *> inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsOutputSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConstantSupported(const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvertFp32ToBf16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInputSupported(const TensorInfo &input, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1701 of file RefLayerSupport.cpp.

References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1706 {
1708 
1709  std::array<DataType, 1> supportedTypes =
1710  {
1712  };
1713 
1714  bool supported = true;
1715  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1716  "Reference LogicalBinary: input 0 type not supported");
1717  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1718  "Reference LogicalBinary: input 1 type not supported");
1719 
1720  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1721  "Reference LogicalBinary: input and output types do not match");
1722 
1723  return supported;
1724 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported 
) const
override

Definition at line 1726 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

1730 {
1732 
1733  std::array<DataType, 3> supportedTypes =
1734  {
1738  };
1739 
1740  bool supported = true;
1741  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1742  "Reference LogSoftmax: input type not supported");
1743 
1744  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1745  "Reference LogSoftmax: output type not supported");
1746 
1747  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1748  "Reference LogSoftmax: input and output types do not match");
1749 
1750  return supported;
1751 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1753 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1763 {
1766 
1767  bool supported = true;
1768 
1769  std::array<DataType,3> supportedTypes = {
1773  };
1774 
1775  // check inputs and outputs
1776  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1777  "Reference Lstm: input is not a supported type.");
1778  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1779  "Reference Lstm: input and outputStateIn types are mismatched");
1780  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1781  "Reference Lstm: input and cellStateIn types are mismatched");
1782  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1783  "Reference Lstm: input and scratchBuffer types are mismatched");
1784  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1785  "Reference Lstm: input and outputStateOut types are mismatched");
1786  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1787  "Reference Lstm: input and cellStateOut types are mismatched");
1788 
1789  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1790  "Reference Lstm: input and output types are mismatched");
1791  // check layer parameters
1792  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1793  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1794  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1795  "Reference Lstm: input and InputToCellWeights types are mismatched");
1796  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1797  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1798  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1799  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1800  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1801  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1802  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1803  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1804  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1805  "Reference Lstm: input and ForgetGateBias types are mismatched");
1806  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1807  "Reference Lstm: input and CellBias types are mismatched");
1808  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1809  "Reference Lstm: input and OutputGateBias types are mismatched");
1810  if (!descriptor.m_CifgEnabled)
1811  {
1812  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1813  "Reference Lstm: input and InputToInputWeights types are mismatched");
1814  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1816  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1818  "Reference Lstm: input and InputGateBias types are mismatched");
1819  if (descriptor.m_PeepholeEnabled)
1820  {
1821  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1823  "Reference Lstm: input and CellToInputWeights types are mismatched");
1824  }
1825  }
1826  if (descriptor.m_PeepholeEnabled)
1827  {
1828  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1829  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1830  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1831  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1832  }
1833  if (descriptor.m_ProjectionEnabled)
1834  {
1835  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1836  "Reference Lstm: input and mProjectionWeights types are mismatched");
1837  if (paramsInfo.m_ProjectionBias != nullptr)
1838  {
1839  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1840  "Reference Lstm: input and ProjectionBias types are mismatched");
1841  }
1842  }
1843  if (descriptor.m_LayerNormEnabled)
1844  {
1845  if (!descriptor.m_CifgEnabled)
1846  {
1847  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1849  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1850  }
1851  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1853  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1854  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1856  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1857  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1859  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1860  }
1861 
1862  return supported;
1863 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1865 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1869 {
1870  bool supported = true;
1871 
1872  std::array<DataType,7> supportedTypes = {
1880  };
1881 
1882  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1883  "Reference maximum: input 0 is not a supported type.");
1884 
1885  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1886  "Reference maximum: input 1 is not a supported type.");
1887 
1888  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1889  "Reference maximum: output is not a supported type.");
1890 
1891  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1892  "Reference maximum: input 0 and Input 1 types are mismatched");
1893 
1894  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1895  "Reference maximum: input and output types are mismatched");
1896 
1897  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1898  "Reference maximum: shapes are not suitable for implicit broadcast.");
1899 
1900  return supported;
1901 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1903 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::numeric_cast(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1907 {
1908  bool supported = true;
1909  std::string meanLayerStr = "Mean";
1910  std::string outputTensorStr = "output";
1911 
1912  std::array<DataType,6> supportedTypes =
1913  {
1920  };
1921 
1922  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1923  "Reference Mean: input type not supported.");
1924 
1925  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1926  "Reference Mean: input and output types are mismatched");
1927 
1928  if (descriptor.m_KeepDims)
1929  {
1930  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1932  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1934  meanLayerStr, outputTensorStr).data());
1935  }
1936  else if (descriptor.m_Axis.empty())
1937  {
1938  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1940  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1941  meanLayerStr, outputTensorStr).data());
1942  }
1943  else
1944  {
1945  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1946 
1947  if (outputDim > 0)
1948  {
1949  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1951  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1952  meanLayerStr, outputTensorStr).data());
1953  }
1954  else
1955  {
1956  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1958  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1959  meanLayerStr, outputTensorStr).data());
1960  }
1961  }
1962 
1963  return supported;
1964 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo const ActivationDescriptor & descriptor
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1966 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

1969 {
1970  bool supported = true;
1971 
1972  std::array<DataType,7> supportedTypes =
1973  {
1981  };
1982 
1983  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1984  "Reference MemCopy: input type not supported");
1985 
1986  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1987  "Reference MemCopy: output type not supported");
1988 
1989  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1990  "Reference MemCopy: input and output types are mismatched");
1991 
1992  return supported;
1993 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1995 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

1999 {
2000  bool supported = true;
2001 
2002  std::array<DataType,7> supportedTypes = {
2010  };
2011 
2012  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2013  "Reference minimum: input 0 is not a supported type.");
2014 
2015  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2016  "Reference minimum: input 1 is not a supported type.");
2017 
2018  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2019  "Reference minimum: output is not a supported type.");
2020 
2021  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2022  "Reference minimum: input 0 and Input 1 types are mismatched");
2023 
2024  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2025  "Reference minimum: input and output types are mismatched");
2026 
2027  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2028  "Reference minimum: shapes are not suitable for implicit broadcast.");
2029 
2030  return supported;
2031 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2033 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2037 {
2038  bool supported = true;
2039 
2040  std::array<DataType,7> supportedTypes = {
2048  };
2049 
2050  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2051  "Reference multiplication: input 0 is not a supported type.");
2052 
2053  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2054  "Reference multiplication: input 1 is not a supported type.");
2055 
2056  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2057  "Reference multiplication: output is not a supported type.");
2058 
2059  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2060  "Reference multiplication: input 0 and Input 1 types are mismatched");
2061 
2062  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2063  "Reference multiplication: input and output types are mismatched");
2064 
2065  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2066  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2067 
2068  return supported;
2069 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2071 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2075 {
2077 
2078  // Define supported types
2079  std::array<DataType, 6> supportedTypes =
2080  {
2087  };
2088 
2089  bool supported = true;
2090 
2091  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2092  "Reference normalization: input type not supported.");
2093 
2094  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2095  "Reference normalization: output type not supported.");
2096 
2097  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2098  "Reference normalization: input and output shapes have different "
2099  "num total elements.");
2100 
2101  return supported;
2102 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2104 of file RefLayerSupport.cpp.

Referenced by RefLayerSupport::IsLayerSupported().

2106 {
2107  return true;
2108 }

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2110 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2114 {
2116  bool supported = true;
2117 
2118  // Define supported output and inputs types.
2119  std::array<DataType,6> supportedTypes =
2120  {
2127  };
2128 
2129  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2130  "Reference pad: input is not a supported type.");
2131 
2132  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2133  "Reference pad: output is not a supported type.");
2134 
2135  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2136  "Reference pad: input and output types are mismatched.");
2137 
2138  return supported;
2139 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2141 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2145 {
2147  bool supported = true;
2148 
2149  // Define supported output and inputs types.
2150  std::array<DataType, 6> supportedTypes =
2151  {
2158  };
2159 
2160  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2161  "Reference permute: input is not a supported type.");
2162 
2163  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2164  "Reference permute: output is not a supported type.");
2165 
2166  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2167  "Reference permute: input and output types are mismatched.");
2168 
2169  return supported;
2170 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2172 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2176 {
2178  bool supported = true;
2179 
2180  // Define supported output and inputs types.
2181  std::array<DataType,6> supportedTypes =
2182  {
2189  };
2190 
2191  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2192  "Reference poolind2d: input is not a supported type.");
2193 
2194  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2195  "Reference poolind2d: output is not a supported type.");
2196 
2197  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2198  "Reference poolind2d: input and output types are mismatched.");
2199 
2200  return supported;
2201 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2203 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2207 {
2209  bool supported = true;
2210 
2211  // Define supported output and inputs types.
2212  std::array<DataType,6> supportedTypes =
2213  {
2220  };
2221 
2222  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2223  "Reference poolind3d: input is not a supported type.");
2224 
2225  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2226  "Reference poolind3d: output is not a supported type.");
2227 
2228  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2229  "Reference poolind3d: input and output types are mismatched.");
2230 
2231  return supported;
2232 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2662 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2666 {
2667  bool supported = true;
2668 
2669  std::array<DataType, 6> supportedTypes
2670  {
2677  };
2678 
2679  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2680  "PReLU: input is not a supported type.");
2681 
2682  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2683  "PReLU: alpha is not a supported type.");
2684 
2685  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2686  "PReLU: output is not a supported type.");
2687 
2688  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2689  "PReLU: input, alpha and output types are mismatched");
2690 
2691  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2692  "PReLU: shapes are not suitable for implicit broadcast");
2693 
2694  return supported;
2695 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & alpha
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2235 of file RefLayerSupport.cpp.

References armnn::IgnoreUnused().

Referenced by RefLayerSupport::IsLayerSupported().

2244 {
2245  IgnoreUnused(input);
2253 
2255 
2256  return true;
2257 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo const TensorInfo & previousCellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
const TensorInfo & previousOutputIn

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2259 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

2262 {
2263  bool supported = true;
2264 
2265  // Define supported input types.
2266  std::array<DataType,7> supportedInputTypes = {
2274  };
2275 
2276  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2277  "Reference quantize: input type not supported.");
2278 
2279  // Define supported output types.
2280  std::array<DataType,4> supportedOutputTypes = {
2284  DataType::QSymmS16
2285  };
2286  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2287  "Reference quantize: output type not supported.");
2288 
2289  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2290  "Reference quantize: input and output shapes have different num total elements.");
2291 
2292  return supported;
2293 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2295 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2298 {
2299  IgnoreUnused(input);
2300  // Define supported output types.
2301  std::array<DataType,1> supportedOutputTypes =
2302  {
2304  };
2305 
2306  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2307  "Reference rank: input type not supported.");
2308 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2310 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2314 {
2316  bool supported = true;
2317  std::array<DataType,7> supportedTypes =
2318  {
2326  };
2327 
2328  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2329  "Reference Reduce: input type not supported");
2330 
2331  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2332  "Reference Reduce: output type not supported");
2333 
2334  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2335  "Reference Reduce: input and output types not matching");
2336 
2337  return supported;
2338 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2340 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2344 {
2347  // Define supported output types.
2348  std::array<DataType,8> supportedOutputTypes =
2349  {
2358  };
2359 
2360  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2361  "Reference reshape: input type not supported.");
2362 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2364 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2368 {
2370  bool supported = true;
2371  std::array<DataType,6> supportedTypes =
2372  {
2379  };
2380 
2381  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2382  "Reference Resize: input type not supported");
2383 
2384  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2385  "Reference Resize: output type not supported");
2386 
2387  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2388  "Reference Resize: input and output types not matching");
2389 
2390  return supported;
2391 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2393 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2396 {
2397  IgnoreUnused(input);
2398  bool supported = true;
2399 
2400  std::array<DataType, 1> supportedTypes =
2401  {
2403  };
2404 
2405  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2406  "Reference Shape: output type not supported");
2407 
2408  return supported;
2409 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2411 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2415 {
2417  bool supported = true;
2418 
2419  std::array<DataType, 5> supportedTypes =
2420  {
2426  };
2427 
2428  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2429  "Reference Slice: input type not supported");
2430 
2431  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2432  "Reference Slice: output type not supported");
2433 
2434  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2435  "Reference Slice: input and output types are mismatched");
2436 
2437  return supported;
2438 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2440 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

Referenced by RefLayerSupport::IsLayerSupported().

2444 {
2446  bool supported = true;
2447  std::array<DataType,7> supportedTypes =
2448  {
2456  };
2457 
2458  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2459  "Reference Softmax: output type not supported");
2460 
2461  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2462  "Reference Softmax: input type not supported");
2463 
2464  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2465  "Reference Softmax: input type not supported");
2466 
2467  return supported;
2468 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2470 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2474 {
2476  bool supported = true;
2477  std::array<DataType,6> supportedTypes =
2478  {
2485  };
2486 
2487  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2488  "Reference SpaceToBatchNd: input type not supported");
2489 
2490  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2491  "Reference SpaceToBatchNd: output type not supported");
2492 
2493  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2494  "Reference SpaceToBatchNd: input and output types are mismatched");
2495 
2496  return supported;
2497 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2499 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2503 {
2504 
2506  bool supported = true;
2507 
2508  std::array<DataType,6> supportedTypes =
2509  {
2516  };
2517 
2518  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2519  "Reference SpaceToDepth: input type not supported");
2520 
2521  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2522  "Reference SpaceToDepth: output type not supported");
2523 
2524  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2525  "Reference SpaceToDepth: input and output types are mismatched");
2526 
2527  return supported;
2528 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2530 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputs, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2534 {
2536  bool supported = true;
2537  std::array<DataType,6> supportedTypes =
2538  {
2545  };
2546 
2547  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2548  "Reference splitter: output type not supported");
2549  for (const TensorInfo& output : outputs)
2550  {
2551  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2552  "Reference splitter: input type not supported");
2553 
2554  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2555  "Reference splitter: input and output types mismatched.");
2556  }
2557 
2558  return supported;
2559 }
const TensorInfo & output
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo *> &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2561 of file RefLayerSupport.cpp.

References ARMNN_ASSERT, armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2565 {
2567 
2568  bool supported = true;
2569  std::array<DataType,7> supportedTypes =
2570  {
2578  };
2579 
2580  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2581  "Reference stack: output type not supported");
2582  for (const TensorInfo* input : inputs)
2583  {
2584  ARMNN_ASSERT(input != nullptr);
2585  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2586  "Reference stack: input type not supported");
2587 
2588  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2589  "Reference stack: input and output types mismatched.");
2590  }
2591 
2592  return supported;
2593 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2595 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2599 {
2601  bool supported = true;
2602 
2603  std::array<DataType,5> supportedTypes =
2604  {
2610  };
2611 
2612  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2613  "Reference StridedSlice: input type not supported");
2614 
2615  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2616  "Reference StridedSlice: output type not supported");
2617 
2618  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2619  "Reference StridedSlice: input and output types are mismatched");
2620 
2621  return supported;
2622 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2624 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2628 {
2629  bool supported = true;
2630 
2631  std::array<DataType,7> supportedTypes = {
2639  };
2640 
2641  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2642  "Reference subtraction: input 0 is not a supported type.");
2643 
2644  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2645  "Reference subtraction: input 1 is not a supported type.");
2646 
2647  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2648  "Reference subtraction: output is not a supported type.");
2649 
2650  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2651  "Reference subtraction: input 0 and Input 1 types are mismatched");
2652 
2653  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2654  "Reference subtraction: input and output types are mismatched");
2655 
2656  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2657  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2658 
2659  return supported;
2660 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
const TensorInfo & input1
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2697 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

Referenced by RefLayerSupport::IsLayerSupported().

2703 {
2705  bool supported = true;
2706 
2707  std::array<DataType,7> supportedTypes =
2708  {
2716  };
2717 
2718  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2719  "Reference TransposeConvolution2d: input is not a supported type.");
2720 
2721  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2722  "Reference TransposeConvolution2d: output is not a supported type.");
2723 
2724  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2725  "Reference TransposeConvolution2d: input and output types mismatched.");
2726 
2727 
2728  const DataType inputType = input.GetDataType();
2729  if (IsQuantized8BitType(inputType))
2730  {
2731  std::array<DataType, 3> supportedWeightTypes =
2732  {
2736  };
2737 
2738  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2739  "Reference TransposeConvolution2d: weights type not supported for "
2740  "quantized input.");
2741  }
2742  else
2743  {
2744  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2745  "Reference TransposeConvolution2d: weights is not a supported type.");
2746 
2747  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2748  "Reference TransposeConvolution2d: input and weights types mismatched.");
2749  }
2750 
2751  if (biases.has_value())
2752  {
2753  std::array<DataType,4> biasesSupportedTypes =
2754  {
2759  };
2760  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2761  "Reference TransposeConvolution2d: biases is not a supported type.");
2762  }
2763 
2764  return supported;
2765 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:285
DataType
Definition: Types.hpp:48
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2767 of file RefLayerSupport.cpp.

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.

Referenced by RefLayerSupport::IsLayerSupported().

2771 {
2773  bool supported = true;
2774 
2775  // Define supported output and inputs types.
2776  std::array<DataType, 6> supportedTypes =
2777  {
2784  };
2785 
2786  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2787  "Reference transpose: input is not a supported type.");
2788 
2789  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2790  "Reference transpose: output is not a supported type.");
2791 
2792  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2793  "Reference transpose: input and output types are mismatched.");
2794 
2795  return supported;
2796 }
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string &>  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2798 of file RefLayerSupport.cpp.

References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

2808 {
2815  bool supported = true;
2816 
2817  std::array<DataType, 2> supportedTypes =
2818  {
2821  };
2822 
2823  std::array<DataType, 2> supportedWeightTypes =
2824  {
2826  DataType::QAsymmS8
2827  };
2828 
2829  std::array<DataType, 3> supportedBiasTypes =
2830  {
2834  };
2835 
2836  // check inputs and outputs
2837  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2838  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2839  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2840  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2841 
2842  // check layer parameters
2843  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2845  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2846  "is not a supported type.");
2847  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2849  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2850  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2852  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2853  "is not a supported type.");
2854  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2856  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2857  "is not a supported type.");
2858  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2860  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2861  "is not a supported type.");
2862  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2864  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2865  "is not a supported type.");
2866 
2867  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2868  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2869  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2870  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2871  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2872  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2873  if (!descriptor.m_CifgEnabled)
2874  {
2875  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2877  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2878  "is not a supported type.");
2879  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2881  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2882  "is not a supported type.");
2883  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2884  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2885  if (descriptor.m_PeepholeEnabled)
2886  {
2887  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2889  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2890  "is not a supported type.");
2891  }
2892  }
2893  if (descriptor.m_PeepholeEnabled)
2894  {
2895  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2897  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2898  "is not a supported type.");
2899  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2901  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2902  "is not a supported type.");
2903  }
2904  if (descriptor.m_ProjectionEnabled)
2905  {
2906  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2908  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2909  "is not a supported type.");
2910  if (paramsInfo.m_ProjectionBias != nullptr)
2911  {
2912  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2913  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2914  "are mismatched");
2915  }
2916  }
2917  if (descriptor.m_LayerNormEnabled)
2918  {
2919  if (!descriptor.m_CifgEnabled)
2920  {
2921  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2923  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2924  "is not a supported type.");
2925  }
2926  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2928  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2929  "is not a supported type.");
2930  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2932  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2933  "is not a supported type.");
2934  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2936  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2937  "is not a supported type.");
2938  }
2939 
2940  return supported;
2941 }
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
const TensorInfo & output
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
void IgnoreUnused(Ts &&...)
const TensorInfo const ActivationDescriptor & descriptor
const TensorInfo & outputStateIn
const TensorInfo const TensorInfo & cellStateIn
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
bool CheckSupportRule(F rule, Optional< std::string &> reasonIfUnsupported, const char *reason)

The documentation for this class was generated from the following files: