ArmNN
 23.05
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from ILayerSupport
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsActivationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsAdditionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsArgMinMaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchToSpaceNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsCastSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsChannelShuffleSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsComparisonSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp32ToFp16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDebugSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthToSpaceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDequantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDivisionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsElementwiseUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFakeQuantizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFillSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFloorSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFullyConnectedSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsGatherSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInputSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInstanceNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsL2NormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalBinarySupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMaximumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMeanSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemCopySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemImportSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMergeSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMinimumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMultiplicationSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsOutputSupported(const TensorInfo &output
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPadSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPermuteSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreCompiledSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreluSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizedLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsRankSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReduceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReshapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsResizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsShapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSliceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToBatchNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToDepthSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSplitterSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSubtractionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSwitchSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input
 

Additional Inherited Members

- Public Attributes inherited from ILayerSupport
const TensorInfooutput
 
const TensorInfo const ActivationDescriptordescriptor
 
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoinput1
 
const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ArgMinMaxDescriptordescriptor
 
const TensorInfo const ArgMinMaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfomean
 
const TensorInfo const TensorInfo const TensorInfovar
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfobeta
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfogamma
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const BatchToSpaceNdDescriptordescriptor
 
const TensorInfo const BatchToSpaceNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ChannelShuffleDescriptordescriptor
 
const TensorInfo const ChannelShuffleDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const ComparisonDescriptordescriptor
 
const TensorInfo const TensorInfo const ComparisonDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConcatSupported(const std Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution2dDescriptordescriptor
 
const TensorInfo const Convolution2dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution3dDescriptordescriptor
 
const TensorInfo const Convolution3dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthToSpaceDescriptordescriptor
 
const TensorInfo const DepthToSpaceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthwiseConvolution2dDescriptordescriptor
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoscores
 
const TensorInfo const TensorInfoanchors
 
const TensorInfo const TensorInfo const TensorInfodetectionBoxes
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionClasses
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionScores
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfonumDetections
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ElementwiseUnaryDescriptordescriptor
 
const TensorInfo const ElementwiseUnaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const FakeQuantizationDescriptordescriptor
 
const FakeQuantizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const FillDescriptordescriptor
 
const TensorInfo const FillDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoweights
 
const TensorInfo const TensorInfo const TensorInfobiases
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const GatherDescriptordescriptor
 
const TensorInfo const TensorInfo const GatherDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const InstanceNormalizationDescriptordescriptor
 
const TensorInfo const InstanceNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const L2NormalizationDescriptordescriptor
 
const TensorInfo const L2NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptordescriptor
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const LogSoftmaxDescriptordescriptor
 
const TensorInfo const LogSoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfooutputStateIn
 
const TensorInfo const TensorInfocellStateIn
 
const TensorInfo const TensorInfo const TensorInfoscratchBuffer
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const MeanDescriptordescriptor
 
const TensorInfo const MeanDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoouput
 
const TensorInfo const NormalizationDescriptordescriptor
 
const TensorInfo const NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PadDescriptordescriptor
 
const TensorInfo const PadDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PermuteDescriptordescriptor
 
const TensorInfo const PermuteDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling2dDescriptordescriptor
 
const TensorInfo const Pooling2dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling3dDescriptordescriptor
 
const TensorInfo const Pooling3dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const PreCompiledDescriptordescriptor
 
const PreCompiledDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoalpha
 
const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReduceDescriptordescriptor
 
const TensorInfo const ReduceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReshapeDescriptordescriptor
 
const TensorInfo const ReshapeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ResizeDescriptordescriptor
 
const TensorInfo const ResizeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SliceDescriptordescriptor
 
const TensorInfo const SliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SoftmaxDescriptordescriptor
 
const TensorInfo const SoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToBatchNdDescriptordescriptor
 
const TensorInfo const SpaceToBatchNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToDepthDescriptordescriptor
 
const TensorInfo const SpaceToDepthDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptordescriptor
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfooutput
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptordescriptor
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfooutput0
 
const TensorInfo const TensorInfo const TensorInfooutput1
 
const TensorInfo const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeConvolution2dDescriptordescriptor
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeDescriptordescriptor
 
const TensorInfo const TransposeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 538 of file RefLayerSupport.cpp.

542 {
543  bool supported = true;
544 
545  // Define supported types.
546  std::array<DataType,6> supportedTypes = {
552  };
553 
554  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
555  "Reference activation: input type not supported.");
556 
557  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
558  "Reference activation: output type not supported.");
559 
560  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
561  "Reference activation: input and output types mismatched.");
562 
563  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
564  "Reference activation: input and output shapes are of different rank.");
565 
566 
567  struct ActivationFunctionSupported : public Rule
568  {
569  ActivationFunctionSupported(const ActivationDescriptor& desc)
570  {
571  switch(desc.m_Function)
572  {
585  {
586  m_Res = true;
587  break;
588  }
589  default:
590  {
591  m_Res = false;
592  break;
593  }
594  }
595  }
596  };
597 
598  // Function is supported
599  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
600  "Reference activation: function not supported.");
601 
602  return supported;
603 }

References armnn::Abs, armnn::BoundedReLu, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 605 of file RefLayerSupport.cpp.

609 {
610  bool supported = true;
611 
612  std::array<DataType,7> supportedTypes = {
619  };
620 
621  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
622  "Reference addition: input 0 is not a supported type.");
623 
624  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
625  "Reference addition: input 1 is not a supported type.");
626 
627  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
628  "Reference addition: output is not a supported type.");
629 
630  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
631  "Reference addition: input 0 and Input 1 types are mismatched");
632 
633  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
634  "Reference addition: input and output types are mismatched");
635 
636  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
637  "Reference addition: shapes are not suitable for implicit broadcast.");
638 
639  return supported;
640 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 642 of file RefLayerSupport.cpp.

645 {
647 
648  std::array<DataType, 8> supportedInputTypes =
649  {
657  };
658 
659  std::array<DataType,2> supportedOutputTypes = {
662  };
663 
664  bool supported = true;
665 
666  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
667  "Reference ArgMinMax: input is not a supported type.");
668  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
669  "Reference ArgMinMax: output type not supported");
670 
671  return supported;
672 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 674 of file RefLayerSupport.cpp.

679 {
681 
682  std::array<DataType, 6> supportedTypes =
683  {
689  };
690 
691  bool supported = true;
692 
693  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
694  "Reference batch matrix multiplication: input X is not a supported type");
695 
696  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
697  "Reference batch matrix multiplication: input Y is not a supported type");
698 
699  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
700  "Reference batch matrix multiplication: output is not a supported type");
701 
702  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
703  "Reference batch matrix multiplication: input X and input Y types are mismatched");
704 
705  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
706  "Reference batch matrix multiplication: inputs and output types are mismatched");
707 
708  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
710  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
711 
712  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
714  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
715 
716  return supported;
717 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 719 of file RefLayerSupport.cpp.

727 {
729 
730  std::array<DataType, 6> supportedTypes =
731  {
737  };
738 
739  bool supported = true;
740 
741  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
742  "Reference batch normalization: input is not a supported type.");
743 
744  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
745  "Reference batch normalization: output is not a supported type.");
746 
747  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
748  "Reference batch normalization: input and output types are mismatched");
749 
750  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
751  "Reference batch normalization: mean is not a supported type.");
752 
753  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
754  "Reference batch normalization: variance is not a supported type.");
755 
756  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
757  "Reference batch normalization: beta is not a supported type.");
758 
759  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
760  "Reference batch normalization: gamma is not a supported type.");
761 
762  return supported;
763 }

References ILayerSupport::beta, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, ILayerSupport::gamma, armnn::IgnoreUnused(), ILayerSupport::mean, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 765 of file RefLayerSupport.cpp.

769 {
771 
772  bool supported = true;
773 
774  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
775  std::string inputTensorStr = "input";
776  std::string outputTensorStr = "output";
777 
778  // Define supported types.
779  std::array<DataType,6> supportedTypes =
780  {
786  };
787 
788  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
789  "Reference BatchToSpaceNd: input type not supported.");
790 
791  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
792  "Reference BatchToSpaceNd: output type not supported.");
793 
794  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
795  "Reference BatchToSpaceNd: input and output types mismatched.");
796 
797  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
799  CreateIncorrectDimensionsErrorMsg(4,
801  batchToSpaceNdLayerStr,
802  outputTensorStr).data());
803 
804  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
806  CreateIncorrectDimensionsErrorMsg(4,
807  input.GetNumDimensions(),
808  batchToSpaceNdLayerStr,
809  inputTensorStr).data());
810 
811  return supported;
812 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 814 of file RefLayerSupport.cpp.

817 {
818  std::array<DataType, 9> supportedInputTypes =
819  {
827  };
828 
829  bool supported = true;
830  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
831  "Reference cast: input is not a supported type");
832 
833 
834  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
835  "Reference cast: output is not a supported type");
836 
837  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
838  "Reference cast: input and output shapes have different number of total elements");
839 
840  return supported;
841 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 843 of file RefLayerSupport.cpp.

847 {
849  bool supported = true;
850 
851  // Define supported output and inputs types.
852  std::array<DataType, 7> supportedTypes =
853  {
860  };
861 
862  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
863  "Reference ChannelShuffle: input is not a supported type.");
864 
865  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
866  "Reference ChannelShuffle: output is not a supported type.");
867 
868  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
869  "Reference ChannelShuffle: input and output types are mismatched.");
870 
871  return supported;
872 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 875 of file RefLayerSupport.cpp.

880 {
882  std::array<DataType, 8> supportedInputTypes =
883  {
891  };
892 
893  bool supported = true;
894  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
895  "Reference comparison: input 0 is not a supported type");
896 
897  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
898  "Reference comparison: input 0 and Input 1 types are mismatched");
899 
901  "Reference comparison: output is not of type Boolean");
902 
903  return supported;
904 }

References armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 906 of file RefLayerSupport.cpp.

910 {
912 
913  bool supported = true;
914  std::array<DataType,7> supportedTypes =
915  {
922  };
923 
924  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
925  "Reference concatenation: output type not supported");
926  for (const TensorInfo* input : inputs)
927  {
928  ARMNN_ASSERT(input != nullptr);
929  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
930  "Reference concatenation: input type not supported");
931 
932  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
933  "Reference concatenation: input and output types mismatched.");
934  }
935 
936  return supported;
937 }

References ARMNN_ASSERT, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 939 of file RefLayerSupport.cpp.

941 {
942  std::array<DataType,8> supportedTypes =
943  {
951  };
952 
953  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
954  "Reference constant: output is not a supported type.");
955 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 957 of file RefLayerSupport.cpp.

960 {
962  input.GetDataType(),
963  &TrueFunc<>,
964  &FalseInputFuncF32<>,
965  &FalseFuncU8<>,
966  &FalseFuncI32<>,
967  &FalseFuncU8<>) &&
970  &FalseOutputFuncF16<>,
971  &TrueFunc<>,
972  &FalseFuncU8<>,
973  &FalseFuncI32<>,
974  &FalseFuncU8<>));
975 }

References TensorInfo::GetDataType(), armnn::IsSupportedForDataTypeGeneric(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 977 of file RefLayerSupport.cpp.

980 {
982  input.GetDataType(),
983  &FalseInputFuncF16<>,
984  &TrueFunc<>,
985  &FalseFuncU8<>,
986  &FalseFuncI32<>,
987  &FalseFuncU8<>) &&
990  &TrueFunc<>,
991  &FalseOutputFuncF32<>,
992  &FalseFuncU8<>,
993  &FalseFuncI32<>,
994  &FalseFuncU8<>));
995 }

References TensorInfo::GetDataType(), armnn::IsSupportedForDataTypeGeneric(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 997 of file RefLayerSupport.cpp.

1003 {
1004  bool supported = true;
1005 
1006  // Define supported types.
1007  std::array<DataType,7> supportedTypes =
1008  {
1015  };
1016 
1017  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1018  "Reference Convolution2d: input is not a supported type.");
1019 
1020  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1021  "Reference Convolution2d: output is not a supported type.");
1022 
1023  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1024  "Reference Convolution2d: input and output types mismatched.");
1025 
1026 
1027  const DataType inputType = input.GetDataType();
1028  if (IsQuantized8BitType(inputType))
1029  {
1030  std::array<DataType, 3> supportedWeightTypes =
1031  {
1035  };
1036 
1037  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1038  "Reference Convolution2d: weights type not supported for quantized input.");
1039  }
1040  else
1041  {
1042  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1043  "Reference Convolution2d: weights is not a supported type.");
1044 
1045  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1046  "Reference Convolution2d: input and weights types mismatched.");
1047  }
1048 
1049  if (biases.has_value())
1050  {
1051  std::array<DataType,4> biasesSupportedTypes =
1052  {
1056  };
1057 
1058  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1059  "Reference Convolution2d: biases is not a supported type.");
1060  }
1062 
1063  return supported;
1064 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1066 of file RefLayerSupport.cpp.

1072 {
1073  bool supported = true;
1074 
1075  // Define supported types.
1076  std::array<DataType,7> supportedTypes =
1077  {
1084  };
1085 
1086  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1087  "Reference Convolution3d: input is not a supported type.");
1088 
1089  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1090  "Reference Convolution3d: output is not a supported type.");
1091 
1092  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1093  "Reference Convolution3d: input and output types mismatched.");
1094 
1095  const DataType inputType = input.GetDataType();
1096  if (IsQuantized8BitType(inputType))
1097  {
1098  std::array<DataType, 3> supportedWeightTypes =
1099  {
1103  };
1104 
1105  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1106  "Reference Convolution3d: weights type not supported for quantized input.");
1107  }
1108  else
1109  {
1110  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1111  "Reference Convolution3d: weights is not a supported type.");
1112 
1113  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1114  "Reference Convolution3d: input and weights types mismatched.");
1115  }
1116 
1117  if (biases.has_value())
1118  {
1119  std::array<DataType,4> biasesSupportedTypes =
1120  {
1124  };
1125 
1126  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1127  "Reference Convolution3d: biases is not a supported type.");
1128  }
1130 
1131  return supported;
1132 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1134 of file RefLayerSupport.cpp.

1137 {
1138  bool supported = true;
1139 
1140  std::array<DataType, 8> supportedTypes =
1141  {
1150  };
1151 
1152  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1153  "Reference for Debug layer: input type not supported");
1154 
1155  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1156  "Reference for Debug layer: output type not supported");
1157 
1158  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1159  "Reference for Debug layer: input and output types are mismatched");
1160 
1161  return supported;
1162 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1164 of file RefLayerSupport.cpp.

1168 {
1170  bool supported = true;
1171 
1172  std::array<DataType,6> supportedTypes =
1173  {
1179  };
1180 
1181  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1182  "Reference DepthToSpace: input type not supported");
1183 
1184  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1185  "Reference DepthToSpace: output type not supported");
1186 
1187  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1188  "Reference DepthToSpace: input and output types are mismatched");
1189 
1190  return supported;
1191 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1193 of file RefLayerSupport.cpp.

1199 {
1201  bool supported = true;
1202 
1203  // Define supported types.
1204  std::array<DataType,7> supportedTypes =
1205  {
1212  };
1213 
1214  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1215  "Reference DepthwiseConvolution2d: input is not a supported type.");
1216 
1217  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1218  "Reference DepthwiseConvolution2d: output is not a supported type.");
1219 
1220  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1221  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1222 
1223  const DataType inputType = input.GetDataType();
1224  if (IsQuantized8BitType(inputType))
1225  {
1226  std::array<DataType, 3> supportedWeightTypes =
1227  {
1231  };
1232 
1233  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1234  "Reference DepthwiseConvolution2d: weights type not supported for "
1235  "quantized input.");
1236  }
1237  else
1238  {
1239  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1240  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1241 
1242  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1243  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1244  }
1245 
1246  if (biases.has_value())
1247  {
1248  std::array<DataType,4> biasesSupportedTypes =
1249  {
1253  };
1254  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1255  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1256  }
1257 
1258  return supported;
1259 
1260 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1262 of file RefLayerSupport.cpp.

1265 {
1266  bool supported = true;
1267 
1268  std::array<DataType,5> supportedInputTypes = {
1274  };
1275 
1276  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1277  "Reference for Dequantize layer: input type not supported.");
1278 
1279  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1280  "Reference for Dequantize layer: per-axis quantized input not supported.");
1281 
1282  std::array<DataType,3> supportedOutputTypes = {
1285  };
1286 
1287  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1288  "Reference for Dequantize layer: output type not supported.");
1289 
1290  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1291  "Reference for Dequantize layer: input/output shapes have different num total "
1292  "elements.");
1293 
1294  return supported;
1295 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1297 of file RefLayerSupport.cpp.

1306 {
1308 
1309  bool supported = true;
1310 
1311  std::array<DataType,6> supportedInputTypes =
1312  {
1318  };
1319 
1320  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1321  "Reference DetectionPostProcess: input 0 is not a supported type.");
1322 
1323  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1324  "Reference DetectionPostProcess: input 1 is not a supported type.");
1325 
1326  return supported;
1327 }

References ILayerSupport::anchors, armnn::CheckSupportRule(), ILayerSupport::descriptor, ILayerSupport::detectionBoxes, ILayerSupport::detectionClasses, ILayerSupport::detectionScores, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::numDetections, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and ILayerSupport::scores.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1339 of file RefLayerSupport.cpp.

1343 {
1344  bool supported = true;
1345 
1346  std::array<DataType,7> supportedTypes = {
1353  };
1354 
1355  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1356  "Reference division: input 0 is not a supported type.");
1357 
1358  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1359  "Reference division: input 1 is not a supported type.");
1360 
1361  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1362  "Reference division: output is not a supported type.");
1363 
1364  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1365  "Reference division: input 0 and Input 1 types are mismatched");
1366 
1367  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1368  "Reference division: input and output types are mismatched");
1369 
1370  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1371  "Reference division: shapes are not suitable for implicit broadcast.");
1372 
1373  return supported;
1374 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1376 of file RefLayerSupport.cpp.

1380 {
1382 
1383  std::array<DataType, 7> supportedTypes =
1384  {
1391  };
1392 
1393  std::array<DataType, 1> logicalSupportedTypes =
1394  {
1396  };
1397 
1398  bool supported = true;
1399 
1400  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1401  {
1402  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1403  "Reference elementwise unary: input type not supported");
1404 
1405  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1406  "Reference elementwise unary: output type not supported");
1407  }
1408  else
1409  {
1410  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1411  "Reference elementwise unary: input type not supported");
1412 
1413  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1414  "Reference elementwise unary: output type not supported");
1415  }
1416 
1417  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1418  "Reference elementwise unary: input and output types not matching");
1419 
1420  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1421  "Reference elementwise unary: input and output shapes"
1422  "have different number of total elements");
1423 
1424  return supported;
1425 }

References armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1427 of file RefLayerSupport.cpp.

1430 {
1432  bool supported = true;
1433 
1434  std::array<DataType,1> supportedTypes =
1435  {
1437  };
1438 
1439  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1440  "Reference fake quantization: input type not supported.");
1441 
1442  return supported;
1443 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1445 of file RefLayerSupport.cpp.

1449 {
1452 
1453  bool supported = true;
1454 
1455  std::array<DataType,3> supportedTypes =
1456  {
1460  };
1461 
1462  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1463  "Reference Fill: input type not supported.");
1464 
1465  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1466  "Reference Fill: output type not supported.");
1467  return supported;
1468 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1470 of file RefLayerSupport.cpp.

1473 {
1475  bool supported = true;
1476 
1477  std::array<DataType,3> supportedTypes =
1478  {
1481  };
1482 
1483  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1484  "Reference Floor: input type not supported.");
1485 
1486  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1487  "Reference Floor: output type not supported.");
1488 
1489  return supported;
1490 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1492 of file RefLayerSupport.cpp.

1498 {
1499  bool supported = true;
1500 
1501  // Define supported types.
1502  std::array<DataType,6> supportedTypes =
1503  {
1509  };
1510 
1511  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1512  "Reference Fully Connected: input type not supported.");
1513 
1514  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1515  "Reference Fully Connected: output type not supported.");
1516 
1517  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1518  "Reference Fully Connected: weights type not supported.");
1519 
1520  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1521  "Reference Fully Connected: input and output types mismatched.");
1522 
1523  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1524  "Reference Fully Connected: weights is not a supported type.");
1525 
1526  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1527  "Reference Fully Connected: input and weights types mismatched.");
1528 
1529  if (descriptor.m_BiasEnabled)
1530  {
1531  // Defined supported types for bias
1532  std::array<DataType, 5>
1533  supportedBiasTypes =
1534  {
1539  };
1540 
1541  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1542  "Reference Fully Connected: bias type not supported.");
1543 
1544  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1545  "Reference Fully Connected: bias and weight types mismatch.");
1546 
1547  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1548  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1549 
1550  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1551  "Reference Fully Connected: bias must have 1 dimension.");
1552 
1553  }
1554 
1555  return supported;
1556 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1558 of file RefLayerSupport.cpp.

1562 {
1563  bool supported = true;
1564  std::array<DataType,7> supportedTypes =
1565  {
1572  };
1573 
1574  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1575  "Reference GatherNd: input type not supported");
1576 
1577  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1578  "Reference GatherNd: output type not supported");
1579 
1581  "Reference GatherNd: indices (input1) type not supported");
1582 
1583  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1584  "Reference GatherNd: input and output types not matching");
1585 
1586  return supported;
1587 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1589 of file RefLayerSupport.cpp.

1594 {
1595  bool supported = true;
1596  std::array<DataType,7> supportedTypes =
1597  {
1604  };
1605 
1607  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1608  "Reference Gather: input type not supported");
1609 
1610  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1611  "Reference Gather: output type not supported");
1612 
1614  "Reference Gather: indices (input1) type not supported");
1615 
1616  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1617  "Reference Gather: input and output types not matching");
1618 
1619  return supported;
1620 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1622 of file RefLayerSupport.cpp.

1624 {
1625  return true;
1626 }

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1628 of file RefLayerSupport.cpp.

1632 {
1634  // Define supported types
1635  std::array<DataType, 3> supportedTypes =
1636  {
1639  };
1640 
1641  bool supported = true;
1642 
1643  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1644  "Reference Instance Normalization: input type not supported.");
1645 
1646  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1647  "Reference Instance Normalization: output type not supported.");
1648 
1649  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1650  "Reference Instance Normalization: input and output types mismatched.");
1651 
1652  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1653  "Reference Instance Normalization: input and output shapes have different "
1654  "num total elements.");
1655 
1656  return supported;
1657 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1659 of file RefLayerSupport.cpp.

1663 {
1665  // Define supported types
1666  std::array<DataType, 6> supportedTypes =
1667  {
1673  };
1674 
1675  bool supported = true;
1676 
1677  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1678  "Reference L2normalization: input type not supported.");
1679 
1680  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1681  "Reference L2normalization: output type not supported.");
1682 
1683  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1684  "Reference L2normalization: input and output types mismatched.");
1685 
1686  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1687  "Reference L2normalization: input and output shapes have different "
1688  "num total elements.");
1689 
1690  return supported;
1691 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmInputParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
128  {
129  if (infos.size() != 4)
130  {
131  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
132  "TensorInfos should be of format: {input, output, weights, biases}.");
133  }
134 
135  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
136  if (infos[3] == TensorInfo())
137  {
138  return IsConvolution2dSupported(infos[0],
139  infos[1],
140  desc,
141  infos[2],
142  EmptyOptional(),
144  }
145  else
146  {
147  return IsConvolution2dSupported(infos[0],
148  infos[1],
149  desc,
150  infos[2],
151  infos[3],
153  }
154  }
156  return IsDepthToSpaceSupported(infos[0],
157  infos[1],
158  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
161  {
162  if (infos.size() != 4)
163  {
164  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
165  "TensorInfos should be of format: {input, output, weights, biases}.");
166  }
167 
168  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
169  if (infos[3] == TensorInfo())
170  {
171  return IsDepthwiseConvolutionSupported(infos[0],
172  infos[1],
173  desc,
174  infos[2],
175  EmptyOptional(),
177  }
178  else
179  {
180  return IsDepthwiseConvolutionSupported(infos[0],
181  infos[1],
182  desc,
183  infos[2],
184  infos[3],
186  }
187  }
189  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
190  case LayerType::Division:
191  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
193  {
194  std::array<DataType, 7> supportedTypes =
195  {
202  };
203 
204  bool supported = true;
205  supported &= CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
206  "Reference elementwise unary: input type not supported");
207 
208  supported &= CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
209  "Reference elementwise unary: input type not supported");
210 
211  supported &= CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
212  "Reference elementwise unary: output type not supported");
213 
214  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
215  "Reference elementwise unary: input types not matching");
216 
217  supported &= CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
218  "Reference elementwise unary: input and output types not matching");
219 
220  return supported;
221  }
223  return IsElementwiseUnarySupported(infos[0],
224  infos[1],
225  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
227  case LayerType::Fill:
228  return IsFillSupported(infos[0],
229  infos[1],
230  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
232  case LayerType::Floor:
233  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
235  return IsFullyConnectedSupported(infos[0],
236  infos[1],
237  infos[2],
238  infos[3],
239  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
241  case LayerType::Gather:
242  return IsGatherSupported(infos[0],
243  infos[1],
244  infos[2],
245  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
247  case LayerType::GatherNd:
248  return IsGatherNdSupported(infos[0],
249  infos[1],
250  infos[2],
252  case LayerType::Input:
253  return IsInputSupported(infos[0], reasonIfUnsupported);
255  return IsInstanceNormalizationSupported(infos[0],
256  infos[1],
257  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
258  (&descriptor)),
261  return IsL2NormalizationSupported(infos[0],
262  infos[1],
263  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
266  return IsLogicalBinarySupported(infos[0],
267  infos[1],
268  infos[2],
269  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
272  return IsLogSoftmaxSupported(infos[0],
273  infos[1],
274  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
276  case LayerType::Lstm:
277  return IsLstmSupported(infos[0],
278  infos[1],
279  infos[2],
280  infos[3],
281  infos[4],
282  infos[5],
283  infos[6],
284  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
285  lstmParamsInfo.value(),
287  case LayerType::QLstm:
288  return IsQLstmSupported(infos[0],
289  infos[1],
290  infos[2],
291  infos[3],
292  infos[4],
293  infos[5],
294  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
295  lstmParamsInfo.value(),
297  case LayerType::Maximum:
298  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
299  case LayerType::Mean:
300  return IsMeanSupported(infos[0],
301  infos[1],
302  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
304  case LayerType::Minimum:
305  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
307  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
309  return IsNormalizationSupported(infos[0],
310  infos[1],
311  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
313  case LayerType::Output:
314  return IsOutputSupported(infos[0], reasonIfUnsupported);
315  case LayerType::Pad:
316  return IsPadSupported(infos[0],
317  infos[1],
318  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
320  case LayerType::Permute:
321  return IsPermuteSupported(infos[0],
322  infos[1],
323  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
326  return IsPooling2dSupported(infos[0],
327  infos[1],
328  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
330  case LayerType::Prelu:
331  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
332  case LayerType::Quantize:
333  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
334  case LayerType::Reshape:
335  return IsReshapeSupported(infos[0],
336  infos[1],
337  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
339  case LayerType::Resize:
340  return IsResizeSupported(infos[0],
341  infos[1],
342  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
344  case LayerType::Reduce:
345  return IsReduceSupported(infos[0],
346  infos[1],
347  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
349  case LayerType::Slice:
350  return IsSliceSupported(infos[0],
351  infos[1],
352  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
354  case LayerType::Softmax:
355  return IsSoftmaxSupported(infos[0],
356  infos[1],
357  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
360  return IsSpaceToBatchNdSupported(infos[0],
361  infos[1],
362  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
365  return IsSpaceToDepthSupported(infos[0],
366  infos[1],
367  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
369  case LayerType::Splitter:
370  {
371  std::vector<TensorInfo> outputInfos;
372  for (uint32_t i = 1; i < infos.size(); i++)
373  {
374  outputInfos.push_back(infos[i]);
375  }
376  return IsSplitterSupported(infos[0],
377  {outputInfos.begin(), outputInfos.end()},
378  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
380  }
381  case LayerType::Stack:
382  {
383  std::vector<const TensorInfo*> inputInfos;
384  for (uint32_t i = 0; i < infos.size() - 1; i++)
385  {
386  inputInfos.push_back(&infos[i]);
387  }
388  return IsStackSupported(inputInfos,
389  infos[infos.size() - 1],
390  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
392  }
394  return IsStridedSliceSupported(infos[0],
395  infos[1],
396  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
399  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
401  return IsTransposeSupported(infos[0],
402  infos[1],
403  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
406  {
407  if (infos.size() != 4)
408  {
409  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
410  "TensorInfos should be of format: {input, output, weights, biases}.");
411  }
412 
413  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
414  if (infos[3] == TensorInfo())
415  {
416  return IsTransposeConvolution2dSupported(infos[0],
417  infos[1],
418  desc,
419  infos[2],
420  EmptyOptional(),
422  }
423  else
424  {
425  return IsTransposeConvolution2dSupported(infos[0],
426  infos[1],
427  desc,
428  infos[2],
429  infos[3],
431  }
432  }
433  case LayerType::Cast:
434  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
436  return IsChannelShuffleSupported(infos[0],
437  infos[1],
438  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
441  {
442  if (infos.size() != 4)
443  {
444  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
445  "TensorInfos should be of format: {input, output, weights, biases}.");
446  }
447 
448  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
449  if (infos[3] == TensorInfo())
450  {
451  return IsConvolution3dSupported(infos[0],
452  infos[1],
453  desc,
454  infos[2],
455  EmptyOptional(),
457  }
458  else
459  {
460  return IsConvolution3dSupported(infos[0],
461  infos[1],
462  desc,
463  infos[2],
464  infos[3],
466  }
467  }
468  case LayerType::Debug:
469  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
471  return IsDetectionPostProcessSupported(infos[0],
472  infos[1],
473  infos[2],
474  infos[3],
475  infos[4],
476  infos[5],
477  infos[6],
478  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
479  (&descriptor)),
482  return IsFakeQuantizationSupported(infos[0],
483  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
485  case LayerType::MemCopy:
486  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
487  case LayerType::Rank:
488  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
489  case LayerType::Shape:
490  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
492  {
493  if (infos.size() != 6)
494  {
495  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
496  "should be of format: {input, outputStateIn, cellStateIn, "
497  "hiddenStateOutputVal, cellStateOutputVal, output}");
498  }
499  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
501  infos[1],
502  infos[2],
503  infos[3],
504  infos[4],
505  infos[5],
506  desc,
507  lstmParamsInfo.value(),
509  }
511  return IsPooling3dSupported(infos[0],
512  infos[1],
513  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
515  case LayerType::Map:
516  return true;
517  case LayerType::Unmap:
518  return true;
521  case LayerType::Merge:
522  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
525  infos[1],
526  infos[2],
527  infos[3],
528  infos[4],
529  quantizedLstmInputParamsInfo.value(),
531  default:
532  // layers not supported in neon by default:
533  // precompiled, standin, switch
534  return false;
535  }
536 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::CheckSupportRule(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, ILayerSupport::descriptor, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchMatMulSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QLstm, armnn::QSymmS16, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, ILayerSupport::reasonIfUnsupported, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const
override

Definition at line 1693 of file RefLayerSupport.cpp.

1698 {
1700 
1701  std::array<DataType, 1> supportedTypes =
1702  {
1704  };
1705 
1706  bool supported = true;
1707  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1708  "Reference LogicalBinary: input 0 type not supported");
1709  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1710  "Reference LogicalBinary: input 1 type not supported");
1711 
1712  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1713  "Reference LogicalBinary: input and output types do not match");
1714 
1715  return supported;
1716 }

References armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::IgnoreUnused(), ILayerSupport::input1, ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const
override

Definition at line 1718 of file RefLayerSupport.cpp.

1722 {
1724 
1725  std::array<DataType, 3> supportedTypes =
1726  {
1729  };
1730 
1731  bool supported = true;
1732  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1733  "Reference LogSoftmax: input type not supported");
1734 
1735  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1736  "Reference LogSoftmax: output type not supported");
1737 
1738  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1739  "Reference LogSoftmax: input and output types do not match");
1740 
1741  return supported;
1742 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1744 of file RefLayerSupport.cpp.

1754 {
1757 
1758  bool supported = true;
1759 
1760  std::array<DataType,3> supportedTypes = {
1763  };
1764 
1765  // check inputs and outputs
1766  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1767  "Reference Lstm: input is not a supported type.");
1768  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1769  "Reference Lstm: input and outputStateIn types are mismatched");
1770  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1771  "Reference Lstm: input and cellStateIn types are mismatched");
1772  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1773  "Reference Lstm: input and scratchBuffer types are mismatched");
1774  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1775  "Reference Lstm: input and outputStateOut types are mismatched");
1776  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1777  "Reference Lstm: input and cellStateOut types are mismatched");
1778 
1779  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1780  "Reference Lstm: input and output types are mismatched");
1781  // check layer parameters
1782  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1783  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1784  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1785  "Reference Lstm: input and InputToCellWeights types are mismatched");
1786  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1787  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1788  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1789  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1790  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1791  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1792  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1793  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1794  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1795  "Reference Lstm: input and ForgetGateBias types are mismatched");
1796  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1797  "Reference Lstm: input and CellBias types are mismatched");
1798  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1799  "Reference Lstm: input and OutputGateBias types are mismatched");
1800  if (!descriptor.m_CifgEnabled)
1801  {
1802  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1803  "Reference Lstm: input and InputToInputWeights types are mismatched");
1804  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1806  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1808  "Reference Lstm: input and InputGateBias types are mismatched");
1809  if (descriptor.m_PeepholeEnabled)
1810  {
1811  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1813  "Reference Lstm: input and CellToInputWeights types are mismatched");
1814  }
1815  }
1816  if (descriptor.m_PeepholeEnabled)
1817  {
1818  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1819  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1820  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1821  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1822  }
1823  if (descriptor.m_ProjectionEnabled)
1824  {
1825  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1826  "Reference Lstm: input and mProjectionWeights types are mismatched");
1827  if (paramsInfo.m_ProjectionBias != nullptr)
1828  {
1829  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1830  "Reference Lstm: input and ProjectionBias types are mismatched");
1831  }
1832  }
1833  if (descriptor.m_LayerNormEnabled)
1834  {
1835  if (!descriptor.m_CifgEnabled)
1836  {
1837  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1839  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1840  }
1841  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1843  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1844  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1846  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1847  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1849  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1850  }
1851 
1852  return supported;
1853 }

References ILayerSupport::cellStateIn, ILayerSupport::cellStateOut, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputStateIn, ILayerSupport::outputStateOut, ILayerSupport::paramsInfo, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and ILayerSupport::scratchBuffer.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1855 of file RefLayerSupport.cpp.

1859 {
1860  bool supported = true;
1861 
1862  std::array<DataType,7> supportedTypes = {
1869  };
1870 
1871  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1872  "Reference maximum: input 0 is not a supported type.");
1873 
1874  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1875  "Reference maximum: input 1 is not a supported type.");
1876 
1877  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1878  "Reference maximum: output is not a supported type.");
1879 
1880  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1881  "Reference maximum: input 0 and Input 1 types are mismatched");
1882 
1883  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1884  "Reference maximum: input and output types are mismatched");
1885 
1886  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1887  "Reference maximum: shapes are not suitable for implicit broadcast.");
1888 
1889  return supported;
1890 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1892 of file RefLayerSupport.cpp.

1896 {
1897  bool supported = true;
1898  std::string meanLayerStr = "Mean";
1899  std::string outputTensorStr = "output";
1900 
1901  std::array<DataType,6> supportedTypes =
1902  {
1908  };
1909 
1910  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1911  "Reference Mean: input type not supported.");
1912 
1913  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1914  "Reference Mean: input and output types are mismatched");
1915 
1916  if (descriptor.m_KeepDims)
1917  {
1918  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1920  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1922  meanLayerStr, outputTensorStr).data());
1923  }
1924  else if (descriptor.m_Axis.empty())
1925  {
1926  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1928  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1929  meanLayerStr, outputTensorStr).data());
1930  }
1931  else
1932  {
1933  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1934 
1935  if (outputDim > 0)
1936  {
1937  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1939  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1940  meanLayerStr, outputTensorStr).data());
1941  }
1942  else
1943  {
1944  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1946  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1947  meanLayerStr, outputTensorStr).data());
1948  }
1949  }
1950 
1951  return supported;
1952 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1954 of file RefLayerSupport.cpp.

1957 {
1958  bool supported = true;
1959 
1960  std::array<DataType,7> supportedTypes =
1961  {
1969  };
1970 
1971  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1972  "Reference MemCopy: input type not supported");
1973 
1974  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1975  "Reference MemCopy: output type not supported");
1976 
1977  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1978  "Reference MemCopy: input and output types are mismatched");
1979 
1980  return supported;
1981 }

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1983 of file RefLayerSupport.cpp.

1987 {
1988  bool supported = true;
1989 
1990  std::array<DataType,7> supportedTypes = {
1997  };
1998 
1999  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2000  "Reference minimum: input 0 is not a supported type.");
2001 
2002  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2003  "Reference minimum: input 1 is not a supported type.");
2004 
2005  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2006  "Reference minimum: output is not a supported type.");
2007 
2008  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2009  "Reference minimum: input 0 and Input 1 types are mismatched");
2010 
2011  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2012  "Reference minimum: input and output types are mismatched");
2013 
2014  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2015  "Reference minimum: shapes are not suitable for implicit broadcast.");
2016 
2017  return supported;
2018 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2020 of file RefLayerSupport.cpp.

2024 {
2025  bool supported = true;
2026 
2027  std::array<DataType,7> supportedTypes = {
2034  };
2035 
2036  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2037  "Reference multiplication: input 0 is not a supported type.");
2038 
2039  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2040  "Reference multiplication: input 1 is not a supported type.");
2041 
2042  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2043  "Reference multiplication: output is not a supported type.");
2044 
2045  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2046  "Reference multiplication: input 0 and Input 1 types are mismatched");
2047 
2048  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2049  "Reference multiplication: input and output types are mismatched");
2050 
2051  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2052  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2053 
2054  return supported;
2055 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2057 of file RefLayerSupport.cpp.

2061 {
2063 
2064  // Define supported types
2065  std::array<DataType, 6> supportedTypes =
2066  {
2072  };
2073 
2074  bool supported = true;
2075 
2076  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2077  "Reference normalization: input type not supported.");
2078 
2079  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2080  "Reference normalization: output type not supported.");
2081 
2082  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2083  "Reference normalization: input and output shapes have different "
2084  "num total elements.");
2085 
2086  return supported;
2087 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2089 of file RefLayerSupport.cpp.

2091 {
2092  return true;
2093 }

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2095 of file RefLayerSupport.cpp.

2099 {
2101  bool supported = true;
2102 
2103  // Define supported output and inputs types.
2104  std::array<DataType,6> supportedTypes =
2105  {
2111  };
2112 
2113  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2114  "Reference pad: input is not a supported type.");
2115 
2116  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2117  "Reference pad: output is not a supported type.");
2118 
2119  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2120  "Reference pad: input and output types are mismatched.");
2121 
2122  return supported;
2123 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2125 of file RefLayerSupport.cpp.

2129 {
2131  bool supported = true;
2132 
2133  // Define supported output and inputs types.
2134  std::array<DataType, 6> supportedTypes =
2135  {
2142  };
2143 
2144  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2145  "Reference permute: input is not a supported type.");
2146 
2147  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2148  "Reference permute: output is not a supported type.");
2149 
2150  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2151  "Reference permute: input and output types are mismatched.");
2152 
2153  return supported;
2154 }

References armnn::BFloat16, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2156 of file RefLayerSupport.cpp.

2160 {
2162  bool supported = true;
2163 
2164  // Define supported output and inputs types.
2165  std::array<DataType,6> supportedTypes =
2166  {
2172  };
2173 
2174  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2175  "Reference poolind2d: input is not a supported type.");
2176 
2177  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2178  "Reference poolind2d: output is not a supported type.");
2179 
2180  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2181  "Reference poolind2d: input and output types are mismatched.");
2182 
2183  return supported;
2184 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2186 of file RefLayerSupport.cpp.

2190 {
2192  bool supported = true;
2193 
2194  // Define supported output and inputs types.
2195  std::array<DataType,6> supportedTypes =
2196  {
2202  };
2203 
2204  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2205  "Reference poolind3d: input is not a supported type.");
2206 
2207  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2208  "Reference poolind3d: output is not a supported type.");
2209 
2210  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2211  "Reference poolind3d: input and output types are mismatched.");
2212 
2213  return supported;
2214 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2634 of file RefLayerSupport.cpp.

2638 {
2639  bool supported = true;
2640 
2641  std::array<DataType, 6> supportedTypes
2642  {
2648  };
2649 
2650  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2651  "PReLU: input is not a supported type.");
2652 
2653  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2654  "PReLU: alpha is not a supported type.");
2655 
2656  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2657  "PReLU: output is not a supported type.");
2658 
2659  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2660  "PReLU: input, alpha and output types are mismatched");
2661 
2662  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2663  "PReLU: shapes are not suitable for implicit broadcast");
2664 
2665  return supported;
2666 }

References ILayerSupport::alpha, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2241 of file RefLayerSupport.cpp.

2244 {
2245  bool supported = true;
2246 
2247  // Define supported input types.
2248  std::array<DataType,7> supportedInputTypes = {
2255  };
2256 
2257  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2258  "Reference quantize: input type not supported.");
2259 
2260  // Define supported output types.
2261  std::array<DataType,4> supportedOutputTypes = {
2266  };
2267  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2268  "Reference quantize: output type not supported.");
2269 
2270  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2271  "Reference quantize: input and output shapes have different num total elements.");
2272 
2273  return supported;
2274 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2276 of file RefLayerSupport.cpp.

2279 {
2280  IgnoreUnused(input);
2281  // Define supported output types.
2282  std::array<DataType,1> supportedOutputTypes =
2283  {
2285  };
2286 
2287  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2288  "Reference rank: input type not supported.");
2289 }

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2291 of file RefLayerSupport.cpp.

2295 {
2297  bool supported = true;
2298  std::array<DataType,7> supportedTypes =
2299  {
2306  };
2307 
2308  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2309  "Reference Reduce: input type not supported");
2310 
2311  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2312  "Reference Reduce: output type not supported");
2313 
2314  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2315  "Reference Reduce: input and output types not matching");
2316 
2317  return supported;
2318 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2320 of file RefLayerSupport.cpp.

2324 {
2327  // Define supported output types.
2328  std::array<DataType,8> supportedOutputTypes =
2329  {
2338  };
2339 
2340  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2341  "Reference reshape: input type not supported.");
2342 }

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2344 of file RefLayerSupport.cpp.

2348 {
2350  bool supported = true;
2351  std::array<DataType,6> supportedTypes =
2352  {
2359  };
2360 
2361  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2362  "Reference Resize: input type not supported");
2363 
2364  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2365  "Reference Resize: output type not supported");
2366 
2367  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2368  "Reference Resize: input and output types not matching");
2369 
2370  return supported;
2371 }

References armnn::BFloat16, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2373 of file RefLayerSupport.cpp.

2376 {
2377  IgnoreUnused(input);
2378  bool supported = true;
2379 
2380  std::array<DataType, 1> supportedTypes =
2381  {
2383  };
2384 
2385  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2386  "Reference Shape: output type not supported");
2387 
2388  return supported;
2389 }

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2391 of file RefLayerSupport.cpp.

2395 {
2397  bool supported = true;
2398 
2399  std::array<DataType, 5> supportedTypes =
2400  {
2405  };
2406 
2407  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2408  "Reference Slice: input type not supported");
2409 
2410  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2411  "Reference Slice: output type not supported");
2412 
2413  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2414  "Reference Slice: input and output types are mismatched");
2415 
2416  return supported;
2417 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2419 of file RefLayerSupport.cpp.

2423 {
2425  bool supported = true;
2426  std::array<DataType,7> supportedTypes =
2427  {
2434  };
2435 
2436  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2437  "Reference Softmax: output type not supported");
2438 
2439  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2440  "Reference Softmax: input type not supported");
2441 
2442  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2443  "Reference Softmax: input type not supported");
2444 
2445  return supported;
2446 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2448 of file RefLayerSupport.cpp.

2452 {
2454  bool supported = true;
2455  std::array<DataType,6> supportedTypes =
2456  {
2462  };
2463 
2464  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2465  "Reference SpaceToBatchNd: input type not supported");
2466 
2467  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2468  "Reference SpaceToBatchNd: output type not supported");
2469 
2470  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2471  "Reference SpaceToBatchNd: input and output types are mismatched");
2472 
2473  return supported;
2474 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2476 of file RefLayerSupport.cpp.

2480 {
2481 
2483  bool supported = true;
2484 
2485  std::array<DataType,6> supportedTypes =
2486  {
2492  };
2493 
2494  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2495  "Reference SpaceToDepth: input type not supported");
2496 
2497  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2498  "Reference SpaceToDepth: output type not supported");
2499 
2500  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2501  "Reference SpaceToDepth: input and output types are mismatched");
2502 
2503  return supported;
2504 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2506 of file RefLayerSupport.cpp.

2510 {
2512  bool supported = true;
2513  std::array<DataType,6> supportedTypes =
2514  {
2520  };
2521 
2522  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2523  "Reference splitter: output type not supported");
2524  for (const TensorInfo& output : outputs)
2525  {
2526  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2527  "Reference splitter: input type not supported");
2528 
2529  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2530  "Reference splitter: input and output types mismatched.");
2531  }
2532 
2533  return supported;
2534 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputs, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2536 of file RefLayerSupport.cpp.

2540 {
2542 
2543  bool supported = true;
2544  std::array<DataType,7> supportedTypes =
2545  {
2552  };
2553 
2554  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2555  "Reference stack: output type not supported");
2556  for (const TensorInfo* input : inputs)
2557  {
2558  ARMNN_ASSERT(input != nullptr);
2559  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2560  "Reference stack: input type not supported");
2561 
2562  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2563  "Reference stack: input and output types mismatched.");
2564  }
2565 
2566  return supported;
2567 }

References ARMNN_ASSERT, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2569 of file RefLayerSupport.cpp.

2573 {
2575  bool supported = true;
2576 
2577  std::array<DataType,5> supportedTypes =
2578  {
2583  };
2584 
2585  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2586  "Reference StridedSlice: input type not supported");
2587 
2588  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2589  "Reference StridedSlice: output type not supported");
2590 
2591  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2592  "Reference StridedSlice: input and output types are mismatched");
2593 
2594  return supported;
2595 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2597 of file RefLayerSupport.cpp.

2601 {
2602  bool supported = true;
2603 
2604  std::array<DataType,7> supportedTypes = {
2611  };
2612 
2613  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2614  "Reference subtraction: input 0 is not a supported type.");
2615 
2616  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2617  "Reference subtraction: input 1 is not a supported type.");
2618 
2619  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2620  "Reference subtraction: output is not a supported type.");
2621 
2622  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2623  "Reference subtraction: input 0 and Input 1 types are mismatched");
2624 
2625  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2626  "Reference subtraction: input and output types are mismatched");
2627 
2628  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2629  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2630 
2631  return supported;
2632 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2668 of file RefLayerSupport.cpp.

2674 {
2676  bool supported = true;
2677 
2678  std::array<DataType,7> supportedTypes =
2679  {
2686  };
2687 
2688  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2689  "Reference TransposeConvolution2d: input is not a supported type.");
2690 
2691  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2692  "Reference TransposeConvolution2d: output is not a supported type.");
2693 
2694  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2695  "Reference TransposeConvolution2d: input and output types mismatched.");
2696 
2697 
2698  const DataType inputType = input.GetDataType();
2699  if (IsQuantized8BitType(inputType))
2700  {
2701  std::array<DataType, 3> supportedWeightTypes =
2702  {
2706  };
2707 
2708  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2709  "Reference TransposeConvolution2d: weights type not supported for "
2710  "quantized input.");
2711  }
2712  else
2713  {
2714  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2715  "Reference TransposeConvolution2d: weights is not a supported type.");
2716 
2717  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2718  "Reference TransposeConvolution2d: input and weights types mismatched.");
2719  }
2720 
2721  if (biases.has_value())
2722  {
2723  std::array<DataType,4> biasesSupportedTypes =
2724  {
2728  };
2729  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2730  "Reference TransposeConvolution2d: biases is not a supported type.");
2731  }
2732 
2733  return supported;
2734 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2736 of file RefLayerSupport.cpp.

2740 {
2742  bool supported = true;
2743 
2744  // Define supported output and inputs types.
2745  std::array<DataType, 6> supportedTypes =
2746  {
2753  };
2754 
2755  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2756  "Reference transpose: input is not a supported type.");
2757 
2758  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2759  "Reference transpose: output is not a supported type.");
2760 
2761  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2762  "Reference transpose: input and output types are mismatched.");
2763 
2764  return supported;
2765 }

References armnn::BFloat16, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2767 of file RefLayerSupport.cpp.

2777 {
2784  bool supported = true;
2785 
2786  std::array<DataType, 2> supportedTypes =
2787  {
2790  };
2791 
2792  std::array<DataType, 2> supportedWeightTypes =
2793  {
2796  };
2797 
2798  std::array<DataType, 3> supportedBiasTypes =
2799  {
2803  };
2804 
2805  // check inputs and outputs
2806  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2807  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2808  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2809  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2810 
2811  // check layer parameters
2812  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2814  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2815  "is not a supported type.");
2816  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2818  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2819  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2821  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2822  "is not a supported type.");
2823  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2825  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2826  "is not a supported type.");
2827  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2829  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2830  "is not a supported type.");
2831  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2833  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2834  "is not a supported type.");
2835 
2836  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2837  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2838  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2839  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2840  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2841  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2842  if (!descriptor.m_CifgEnabled)
2843  {
2844  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2846  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2847  "is not a supported type.");
2848  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2850  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2851  "is not a supported type.");
2852  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2853  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2854  if (descriptor.m_PeepholeEnabled)
2855  {
2856  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2858  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2859  "is not a supported type.");
2860  }
2861  }
2862  if (descriptor.m_PeepholeEnabled)
2863  {
2864  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2866  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2867  "is not a supported type.");
2868  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2870  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2871  "is not a supported type.");
2872  }
2873  if (descriptor.m_ProjectionEnabled)
2874  {
2875  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2877  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2878  "is not a supported type.");
2879  if (paramsInfo.m_ProjectionBias != nullptr)
2880  {
2881  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2882  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2883  "are mismatched");
2884  }
2885  }
2886  if (descriptor.m_LayerNormEnabled)
2887  {
2888  if (!descriptor.m_CifgEnabled)
2889  {
2890  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2892  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2893  "is not a supported type.");
2894  }
2895  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2897  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2898  "is not a supported type.");
2899  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2901  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2902  "is not a supported type.");
2903  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2905  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2906  "is not a supported type.");
2907  }
2908 
2909  return supported;
2910 }

References ILayerSupport::cellStateIn, ILayerSupport::cellStateOut, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputStateIn, ILayerSupport::outputStateOut, ILayerSupport::paramsInfo, armnn::QAsymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().


The documentation for this class was generated from the following files:
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1744
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::ActivationFunction::Abs
@ Abs
armnn::ActivationFunction::Elu
@ Elu
armnn::LayerType::Floor
@ Floor
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1558
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:997
armnn::LayerType::MemCopy
@ MemCopy
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1134
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2344
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1622
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:538
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2241
armnn::LayerType::Transpose
@ Transpose
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2668
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:303
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1659
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2736
armnn::ActivationFunction::Linear
@ Linear
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:814
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::ILayerSupport::detectionBoxes
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
Definition: ILayerSupport.hpp:174
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::LayerType::Maximum
@ Maximum
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2057
armnn::LayerType::Quantize
@ Quantize
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2448
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1954
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1892
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2569
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:939
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2089
armnn::DataType::Signed32
@ Signed32
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2476
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::LayerType::Merge
@ Merge
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2020
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:509
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:875
armnn::LayerType::Addition
@ Addition
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1589
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2536
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2276
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1066
armnn::LayerType::Division
@ Division
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:674
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1693
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2634
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1339
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:38
armnn::LayerType::Activation
@ Activation
armnn::ILayerSupport::detectionClasses
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
Definition: ILayerSupport.hpp:175
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::LayerType::Reshape
@ Reshape
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1427
armnn::ILayerSupport::numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
Definition: ILayerSupport.hpp:177
armnn::LayerType::Resize
@ Resize
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:906
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:765
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1297
armnn::LayerType::Rank
@ Rank
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:843
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2156
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2373
armnn::DataType::Float32
@ Float32
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::LayerType::GatherNd
@ GatherNd
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1855
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::DataType::Signed64
@ Signed64
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2095
armnn::LayerType::Lstm
@ Lstm
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1376
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2320
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
armnn::ActivationFunction::Square
@ Square
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1164
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::ILayerSupport::scores
const TensorInfo & scores
Definition: ILayerSupport.hpp:172
armnn::LayerType::Unmap
@ Unmap
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1193
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2291
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::LayerType::Mean
@ Mean
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:642
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1470
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2391
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2506
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2217
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::ActivationFunction::TanH
@ TanH
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1445
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1628
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2597
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1983
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::ILayerSupport::detectionScores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
Definition: ILayerSupport.hpp:176
armnn::ILayerSupport::anchors
const TensorInfo const TensorInfo & anchors
Definition: ILayerSupport.hpp:173
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:957
armnn::DataType::QSymmS8
@ QSymmS8
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:605
armnn::LayerType::Concat
@ Concat
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1492
armnn::DataType::QSymmS16
@ QSymmS16
armnn::LayerType::Cast
@ Cast
armnn::ActivationFunction::ReLu
@ ReLu
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2419
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2125
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2186
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1262
armnn::LayerType::Output
@ Output
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:719
armnn::DataType::Boolean
@ Boolean
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2767
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1718
armnn::LayerType::Prelu
@ Prelu
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::LayerType::Dequantize
@ Dequantize
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:977
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu