ArmNN
 23.02
RefLayerSupport Class Reference

#include <RefLayerSupport.hpp>

Inheritance diagram for RefLayerSupport:
LayerSupportBase ILayerSupport

Public Member Functions

bool IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from LayerSupportBase
bool IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogicalUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreCompiledSupported (const TensorInfo &input, const PreCompiledDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported) const override
 
bool IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsSwitchSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output0, const TensorInfo &output1, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
bool IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
 
- Public Member Functions inherited from ILayerSupport
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsActivationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsAdditionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsArgMinMaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsBatchToSpaceNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsCastSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsChannelShuffleSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsComparisonSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp16ToFp32Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvertFp32ToFp16Supported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConvolution3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDebugSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthToSpaceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDequantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsDivisionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsElementwiseUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFakeQuantizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFillSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFloorSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsFullyConnectedSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsGatherSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInputSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsInstanceNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsL2NormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalBinarySupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogicalUnarySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLogSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMaximumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMeanSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemCopySupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMemImportSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMergeSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMinimumSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsMultiplicationSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsNormalizationSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsOutputSupported(const TensorInfo &output
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPadSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPermuteSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPooling3dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreCompiledSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsPreluSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsQuantizedLstmSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsRankSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReduceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsReshapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsResizeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsShapeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSliceSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSoftmaxSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToBatchNdSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSpaceToDepthSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSplitterSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSubtractionSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsSwitchSupported(const TensorInfo &input0
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeConvolution2dSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsTransposeSupported(const TensorInfo &input
 
 ARMNN_DEPRECATED_MSG_REMOVAL_DATE ("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input
 

Additional Inherited Members

- Public Attributes inherited from ILayerSupport
const TensorInfooutput
 
const TensorInfo const ActivationDescriptordescriptor
 
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoinput1
 
const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ArgMinMaxDescriptordescriptor
 
const TensorInfo const ArgMinMaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfomean
 
const TensorInfo const TensorInfo const TensorInfovar
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfobeta
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfogamma
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const BatchNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const BatchToSpaceNdDescriptordescriptor
 
const TensorInfo const BatchToSpaceNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ChannelShuffleDescriptordescriptor
 
const TensorInfo const ChannelShuffleDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const ComparisonDescriptordescriptor
 
const TensorInfo const TensorInfo const ComparisonDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsConcatSupported(const std Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution2dDescriptordescriptor
 
const TensorInfo const Convolution2dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Convolution3dDescriptordescriptor
 
const TensorInfo const Convolution3dDescriptor const TensorInfoweights
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const Convolution3dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthToSpaceDescriptordescriptor
 
const TensorInfo const DepthToSpaceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const DepthwiseConvolution2dDescriptordescriptor
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const DepthwiseConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoscores
 
const TensorInfo const TensorInfoanchors
 
const TensorInfo const TensorInfo const TensorInfodetectionBoxes
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionClasses
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfodetectionScores
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfonumDetections
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const DetectionPostProcessDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ElementwiseUnaryDescriptordescriptor
 
const TensorInfo const ElementwiseUnaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const FakeQuantizationDescriptordescriptor
 
const FakeQuantizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const FillDescriptordescriptor
 
const TensorInfo const FillDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoweights
 
const TensorInfo const TensorInfo const TensorInfobiases
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const FullyConnectedDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const GatherDescriptordescriptor
 
const TensorInfo const TensorInfo const GatherDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const InstanceNormalizationDescriptordescriptor
 
const TensorInfo const InstanceNormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const L2NormalizationDescriptordescriptor
 
const TensorInfo const L2NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptordescriptor
 
const TensorInfo const TensorInfo const LogicalBinaryDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const LogSoftmaxDescriptordescriptor
 
const TensorInfo const LogSoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfooutputStateIn
 
const TensorInfo const TensorInfocellStateIn
 
const TensorInfo const TensorInfo const TensorInfoscratchBuffer
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const MeanDescriptordescriptor
 
const TensorInfo const MeanDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfoouput
 
const TensorInfo const NormalizationDescriptordescriptor
 
const TensorInfo const NormalizationDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PadDescriptordescriptor
 
const TensorInfo const PadDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const PermuteDescriptordescriptor
 
const TensorInfo const PermuteDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling2dDescriptordescriptor
 
const TensorInfo const Pooling2dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const Pooling3dDescriptordescriptor
 
const TensorInfo const Pooling3dDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const PreCompiledDescriptordescriptor
 
const PreCompiledDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfoalpha
 
const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfo const TensorInfooutputStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QLstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfopreviousCellStateIn
 
const TensorInfo const TensorInfopreviousOutputIn
 
const TensorInfo const TensorInfo const TensorInfocellStateOut
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfooutput
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const QuantizedLstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReduceDescriptordescriptor
 
const TensorInfo const ReduceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ReshapeDescriptordescriptor
 
const TensorInfo const ReshapeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const ResizeDescriptordescriptor
 
const TensorInfo const ResizeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SliceDescriptordescriptor
 
const TensorInfo const SliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SoftmaxDescriptordescriptor
 
const TensorInfo const SoftmaxDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToBatchNdDescriptordescriptor
 
const TensorInfo const SpaceToBatchNdDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const SpaceToDepthDescriptordescriptor
 
const TensorInfo const SpaceToDepthDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptordescriptor
 
const std::vector< std::reference_wrapper< TensorInfo > > const ViewsDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfooutput
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptordescriptor
 
ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStackSupported(const std ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. " "Use ABI Stable IsLayerSupported accepting LayerType argument instead.", "23.08") virtual bool IsStandInSupported(const std const TensorInfo const StridedSliceDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfooutput0
 
const TensorInfo const TensorInfo const TensorInfooutput1
 
const TensorInfo const TensorInfo const TensorInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeConvolution2dDescriptordescriptor
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfoweights
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
 
const TensorInfo const TransposeConvolution2dDescriptor const TensorInfo const Optional< TensorInfo > Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TransposeDescriptordescriptor
 
const TensorInfo const TransposeDescriptor Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptordescriptor
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfoparamsInfo
 
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo Optional< std::string & > reasonIfUnsupported = EmptyOptional()) const
 
- Protected Member Functions inherited from ILayerSupport
 ILayerSupport ()
 
virtual ~ILayerSupport ()
 

Detailed Description

Definition at line 12 of file RefLayerSupport.hpp.

Member Function Documentation

◆ IsActivationSupported()

bool IsActivationSupported ( const TensorInfo input,
const TensorInfo output,
const ActivationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 508 of file RefLayerSupport.cpp.

512 {
513  bool supported = true;
514 
515  // Define supported types.
516  std::array<DataType,6> supportedTypes = {
522  };
523 
524  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
525  "Reference activation: input type not supported.");
526 
527  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
528  "Reference activation: output type not supported.");
529 
530  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
531  "Reference activation: input and output types mismatched.");
532 
533  supported &= CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
534  "Reference activation: input and output shapes are of different rank.");
535 
536 
537  struct ActivationFunctionSupported : public Rule
538  {
539  ActivationFunctionSupported(const ActivationDescriptor& desc)
540  {
541  switch(desc.m_Function)
542  {
555  {
556  m_Res = true;
557  break;
558  }
559  default:
560  {
561  m_Res = false;
562  break;
563  }
564  }
565  }
566  };
567 
568  // Function is supported
569  supported &= CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
570  "Reference activation: function not supported.");
571 
572  return supported;
573 }

References armnn::Abs, armnn::BoundedReLu, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsAdditionSupported()

bool IsAdditionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 575 of file RefLayerSupport.cpp.

579 {
580  bool supported = true;
581 
582  std::array<DataType,7> supportedTypes = {
589  };
590 
591  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
592  "Reference addition: input 0 is not a supported type.");
593 
594  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
595  "Reference addition: input 1 is not a supported type.");
596 
597  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
598  "Reference addition: output is not a supported type.");
599 
600  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
601  "Reference addition: input 0 and Input 1 types are mismatched");
602 
603  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
604  "Reference addition: input and output types are mismatched");
605 
606  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
607  "Reference addition: shapes are not suitable for implicit broadcast.");
608 
609  return supported;
610 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsArgMinMaxSupported()

bool IsArgMinMaxSupported ( const TensorInfo input,
const TensorInfo output,
const ArgMinMaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 612 of file RefLayerSupport.cpp.

615 {
617 
618  std::array<DataType, 8> supportedInputTypes =
619  {
627  };
628 
629  std::array<DataType,2> supportedOutputTypes = {
632  };
633 
634  bool supported = true;
635 
636  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
637  "Reference ArgMinMax: input is not a supported type.");
638  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
639  "Reference ArgMinMax: output type not supported");
640 
641  return supported;
642 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and armnn::Signed64.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchMatMulSupported()

bool IsBatchMatMulSupported ( const TensorInfo inputX,
const TensorInfo inputY,
const TensorInfo output,
const BatchMatMulDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 644 of file RefLayerSupport.cpp.

649 {
651 
652  std::array<DataType, 6> supportedTypes =
653  {
659  };
660 
661  bool supported = true;
662 
663  supported &= CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
664  "Reference batch matrix multiplication: input X is not a supported type");
665 
666  supported &= CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
667  "Reference batch matrix multiplication: input Y is not a supported type");
668 
669  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
670  "Reference batch matrix multiplication: output is not a supported type");
671 
672  supported &= CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
673  "Reference batch matrix multiplication: input X and input Y types are mismatched");
674 
675  supported &= CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
676  "Reference batch matrix multiplication: inputs and output types are mismatched");
677 
678  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
680  "Reference batch matrix multiplication: input X is not of rank 2 or greater");
681 
682  supported &= CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
684  "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
685 
686  return supported;
687 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchNormalizationSupported()

bool IsBatchNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo mean,
const TensorInfo var,
const TensorInfo beta,
const TensorInfo gamma,
const BatchNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 689 of file RefLayerSupport.cpp.

697 {
699 
700  std::array<DataType, 6> supportedTypes =
701  {
707  };
708 
709  bool supported = true;
710 
711  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
712  "Reference batch normalization: input is not a supported type.");
713 
714  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
715  "Reference batch normalization: output is not a supported type.");
716 
717  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
718  "Reference batch normalization: input and output types are mismatched");
719 
720  supported &= CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
721  "Reference batch normalization: mean is not a supported type.");
722 
723  supported &= CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
724  "Reference batch normalization: variance is not a supported type.");
725 
726  supported &= CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
727  "Reference batch normalization: beta is not a supported type.");
728 
729  supported &= CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
730  "Reference batch normalization: gamma is not a supported type.");
731 
732  return supported;
733 }

References ILayerSupport::beta, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, ILayerSupport::gamma, armnn::IgnoreUnused(), ILayerSupport::mean, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsBatchToSpaceNdSupported()

bool IsBatchToSpaceNdSupported ( const TensorInfo input,
const TensorInfo output,
const BatchToSpaceNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 735 of file RefLayerSupport.cpp.

739 {
741 
742  bool supported = true;
743 
744  std::string batchToSpaceNdLayerStr = "batchToSpaceNd";
745  std::string inputTensorStr = "input";
746  std::string outputTensorStr = "output";
747 
748  // Define supported types.
749  std::array<DataType,6> supportedTypes =
750  {
756  };
757 
758  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
759  "Reference BatchToSpaceNd: input type not supported.");
760 
761  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
762  "Reference BatchToSpaceNd: output type not supported.");
763 
764  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
765  "Reference BatchToSpaceNd: input and output types mismatched.");
766 
767  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 4),
769  CreateIncorrectDimensionsErrorMsg(4,
771  batchToSpaceNdLayerStr,
772  outputTensorStr).data());
773 
774  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(input, 4),
776  CreateIncorrectDimensionsErrorMsg(4,
777  input.GetNumDimensions(),
778  batchToSpaceNdLayerStr,
779  inputTensorStr).data());
780 
781  return supported;
782 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsCastSupported()

bool IsCastSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 784 of file RefLayerSupport.cpp.

787 {
788  std::array<DataType, 9> supportedInputTypes =
789  {
797  };
798 
799  bool supported = true;
800  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
801  "Reference cast: input is not a supported type");
802 
803 
804  supported &= CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
805  "Reference cast: output is not a supported type");
806 
807  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
808  "Reference cast: input and output shapes have different number of total elements");
809 
810  return supported;
811 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsChannelShuffleSupported()

bool IsChannelShuffleSupported ( const TensorInfo input,
const TensorInfo output,
const ChannelShuffleDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 813 of file RefLayerSupport.cpp.

817 {
819  bool supported = true;
820 
821  // Define supported output and inputs types.
822  std::array<DataType, 7> supportedTypes =
823  {
830  };
831 
832  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
833  "Reference ChannelShuffle: input is not a supported type.");
834 
835  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
836  "Reference ChannelShuffle: output is not a supported type.");
837 
838  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
839  "Reference ChannelShuffle: input and output types are mismatched.");
840 
841  return supported;
842 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsComparisonSupported()

bool IsComparisonSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const ComparisonDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 845 of file RefLayerSupport.cpp.

850 {
852  std::array<DataType, 8> supportedInputTypes =
853  {
861  };
862 
863  bool supported = true;
864  supported &= CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
865  "Reference comparison: input 0 is not a supported type");
866 
867  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
868  "Reference comparison: input 0 and Input 1 types are mismatched");
869 
871  "Reference comparison: output is not of type Boolean");
872 
873  return supported;
874 }

References armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConcatSupported()

bool IsConcatSupported ( const std::vector< const TensorInfo * >  inputs,
const TensorInfo output,
const OriginsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 876 of file RefLayerSupport.cpp.

880 {
882 
883  bool supported = true;
884  std::array<DataType,7> supportedTypes =
885  {
892  };
893 
894  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
895  "Reference concatenation: output type not supported");
896  for (const TensorInfo* input : inputs)
897  {
898  ARMNN_ASSERT(input != nullptr);
899  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
900  "Reference concatenation: input type not supported");
901 
902  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
903  "Reference concatenation: input and output types mismatched.");
904  }
905 
906  return supported;
907 }

References ARMNN_ASSERT, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConstantSupported()

bool IsConstantSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 909 of file RefLayerSupport.cpp.

911 {
912  std::array<DataType,8> supportedTypes =
913  {
921  };
922 
923  return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
924  "Reference constant: output is not a supported type.");
925 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvertFp16ToFp32Supported()

bool IsConvertFp16ToFp32Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 927 of file RefLayerSupport.cpp.

930 {
932  input.GetDataType(),
933  &TrueFunc<>,
934  &FalseInputFuncF32<>,
935  &FalseFuncU8<>,
936  &FalseFuncI32<>,
937  &FalseFuncU8<>) &&
940  &FalseOutputFuncF16<>,
941  &TrueFunc<>,
942  &FalseFuncU8<>,
943  &FalseFuncI32<>,
944  &FalseFuncU8<>));
945 }

References TensorInfo::GetDataType(), armnn::IsSupportedForDataTypeGeneric(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvertFp32ToFp16Supported()

bool IsConvertFp32ToFp16Supported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 947 of file RefLayerSupport.cpp.

950 {
952  input.GetDataType(),
953  &FalseInputFuncF16<>,
954  &TrueFunc<>,
955  &FalseFuncU8<>,
956  &FalseFuncI32<>,
957  &FalseFuncU8<>) &&
960  &TrueFunc<>,
961  &FalseOutputFuncF32<>,
962  &FalseFuncU8<>,
963  &FalseFuncI32<>,
964  &FalseFuncU8<>));
965 }

References TensorInfo::GetDataType(), armnn::IsSupportedForDataTypeGeneric(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvolution2dSupported()

bool IsConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 967 of file RefLayerSupport.cpp.

973 {
974  bool supported = true;
975 
976  // Define supported types.
977  std::array<DataType,7> supportedTypes =
978  {
985  };
986 
987  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
988  "Reference Convolution2d: input is not a supported type.");
989 
990  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
991  "Reference Convolution2d: output is not a supported type.");
992 
993  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
994  "Reference Convolution2d: input and output types mismatched.");
995 
996 
997  const DataType inputType = input.GetDataType();
998  if (IsQuantized8BitType(inputType))
999  {
1000  std::array<DataType, 3> supportedWeightTypes =
1001  {
1005  };
1006 
1007  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1008  "Reference Convolution2d: weights type not supported for quantized input.");
1009  }
1010  else
1011  {
1012  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1013  "Reference Convolution2d: weights is not a supported type.");
1014 
1015  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1016  "Reference Convolution2d: input and weights types mismatched.");
1017  }
1018 
1019  if (biases.has_value())
1020  {
1021  std::array<DataType,4> biasesSupportedTypes =
1022  {
1026  };
1027 
1028  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1029  "Reference Convolution2d: biases is not a supported type.");
1030  }
1032 
1033  return supported;
1034 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsConvolution3dSupported()

bool IsConvolution3dSupported ( const TensorInfo input,
const TensorInfo output,
const Convolution3dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1036 of file RefLayerSupport.cpp.

1042 {
1043  bool supported = true;
1044 
1045  // Define supported types.
1046  std::array<DataType,7> supportedTypes =
1047  {
1054  };
1055 
1056  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1057  "Reference Convolution3d: input is not a supported type.");
1058 
1059  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1060  "Reference Convolution3d: output is not a supported type.");
1061 
1062  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1063  "Reference Convolution3d: input and output types mismatched.");
1064 
1065  const DataType inputType = input.GetDataType();
1066  if (IsQuantized8BitType(inputType))
1067  {
1068  std::array<DataType, 3> supportedWeightTypes =
1069  {
1073  };
1074 
1075  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1076  "Reference Convolution3d: weights type not supported for quantized input.");
1077  }
1078  else
1079  {
1080  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1081  "Reference Convolution3d: weights is not a supported type.");
1082 
1083  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1084  "Reference Convolution3d: input and weights types mismatched.");
1085  }
1086 
1087  if (biases.has_value())
1088  {
1089  std::array<DataType,4> biasesSupportedTypes =
1090  {
1094  };
1095 
1096  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1097  "Reference Convolution3d: biases is not a supported type.");
1098  }
1100 
1101  return supported;
1102 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDebugSupported()

bool IsDebugSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1104 of file RefLayerSupport.cpp.

1107 {
1108  bool supported = true;
1109 
1110  std::array<DataType, 8> supportedTypes =
1111  {
1120  };
1121 
1122  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1123  "Reference for Debug layer: input type not supported");
1124 
1125  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1126  "Reference for Debug layer: output type not supported");
1127 
1128  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1129  "Reference for Debug layer: input and output types are mismatched");
1130 
1131  return supported;
1132 }

References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDepthToSpaceSupported()

bool IsDepthToSpaceSupported ( const TensorInfo input,
const TensorInfo output,
const DepthToSpaceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1134 of file RefLayerSupport.cpp.

1138 {
1140  bool supported = true;
1141 
1142  std::array<DataType,6> supportedTypes =
1143  {
1149  };
1150 
1151  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1152  "Reference DepthToSpace: input type not supported");
1153 
1154  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1155  "Reference DepthToSpace: output type not supported");
1156 
1157  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1158  "Reference DepthToSpace: input and output types are mismatched");
1159 
1160  return supported;
1161 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDepthwiseConvolutionSupported()

bool IsDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1163 of file RefLayerSupport.cpp.

1169 {
1171  bool supported = true;
1172 
1173  // Define supported types.
1174  std::array<DataType,7> supportedTypes =
1175  {
1182  };
1183 
1184  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1185  "Reference DepthwiseConvolution2d: input is not a supported type.");
1186 
1187  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1188  "Reference DepthwiseConvolution2d: output is not a supported type.");
1189 
1190  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1191  "Reference DepthwiseConvolution2d: input and output types mismatched.");
1192 
1193  const DataType inputType = input.GetDataType();
1194  if (IsQuantized8BitType(inputType))
1195  {
1196  std::array<DataType, 3> supportedWeightTypes =
1197  {
1201  };
1202 
1203  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1204  "Reference DepthwiseConvolution2d: weights type not supported for "
1205  "quantized input.");
1206  }
1207  else
1208  {
1209  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1210  "Reference DepthwiseConvolution2d: weights is not a supported type.");
1211 
1212  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1213  "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1214  }
1215 
1216  if (biases.has_value())
1217  {
1218  std::array<DataType,4> biasesSupportedTypes =
1219  {
1223  };
1224  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1225  "Reference DepthwiseConvolution2d: biases is not a supported type.");
1226  }
1227 
1228  return supported;
1229 
1230 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().

◆ IsDequantizeSupported()

bool IsDequantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1232 of file RefLayerSupport.cpp.

1235 {
1236  bool supported = true;
1237 
1238  std::array<DataType,5> supportedInputTypes = {
1244  };
1245 
1246  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1247  "Reference for Dequantize layer: input type not supported.");
1248 
1249  supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1250  "Reference for Dequantize layer: per-axis quantized input not supported.");
1251 
1252  std::array<DataType,3> supportedOutputTypes = {
1255  };
1256 
1257  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1258  "Reference for Dequantize layer: output type not supported.");
1259 
1260  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1261  "Reference for Dequantize layer: input/output shapes have different num total "
1262  "elements.");
1263 
1264  return supported;
1265 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDetectionPostProcessSupported()

bool IsDetectionPostProcessSupported ( const TensorInfo boxEncodings,
const TensorInfo scores,
const TensorInfo anchors,
const TensorInfo detectionBoxes,
const TensorInfo detectionClasses,
const TensorInfo detectionScores,
const TensorInfo numDetections,
const DetectionPostProcessDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1267 of file RefLayerSupport.cpp.

1276 {
1278 
1279  bool supported = true;
1280 
1281  std::array<DataType,6> supportedInputTypes =
1282  {
1288  };
1289 
1290  supported &= CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1291  "Reference DetectionPostProcess: input 0 is not a supported type.");
1292 
1293  supported &= CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1294  "Reference DetectionPostProcess: input 1 is not a supported type.");
1295 
1296  return supported;
1297 }

References ILayerSupport::anchors, armnn::CheckSupportRule(), ILayerSupport::descriptor, ILayerSupport::detectionBoxes, ILayerSupport::detectionClasses, ILayerSupport::detectionScores, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::numDetections, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and ILayerSupport::scores.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsDilatedDepthwiseConvolutionSupported()

bool IsDilatedDepthwiseConvolutionSupported ( const TensorInfo input,
const TensorInfo output,
const DepthwiseConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

◆ IsDivisionSupported()

bool IsDivisionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1309 of file RefLayerSupport.cpp.

1313 {
1314  bool supported = true;
1315 
1316  std::array<DataType,7> supportedTypes = {
1323  };
1324 
1325  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1326  "Reference division: input 0 is not a supported type.");
1327 
1328  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1329  "Reference division: input 1 is not a supported type.");
1330 
1331  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1332  "Reference division: output is not a supported type.");
1333 
1334  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1335  "Reference division: input 0 and Input 1 types are mismatched");
1336 
1337  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1338  "Reference division: input and output types are mismatched");
1339 
1340  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1341  "Reference division: shapes are not suitable for implicit broadcast.");
1342 
1343  return supported;
1344 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsElementwiseUnarySupported()

bool IsElementwiseUnarySupported ( const TensorInfo input,
const TensorInfo output,
const ElementwiseUnaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1346 of file RefLayerSupport.cpp.

1350 {
1352 
1353  std::array<DataType, 7> supportedTypes =
1354  {
1361  };
1362 
1363  std::array<DataType, 1> logicalSupportedTypes =
1364  {
1366  };
1367 
1368  bool supported = true;
1369 
1370  if (descriptor.m_Operation == UnaryOperation::LogicalNot)
1371  {
1372  supported &= CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1373  "Reference elementwise unary: input type not supported");
1374 
1375  supported &= CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1376  "Reference elementwise unary: output type not supported");
1377  }
1378  else
1379  {
1380  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1381  "Reference elementwise unary: input type not supported");
1382 
1383  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1384  "Reference elementwise unary: output type not supported");
1385  }
1386 
1387  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1388  "Reference elementwise unary: input and output types not matching");
1389 
1390  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1391  "Reference elementwise unary: input and output shapes"
1392  "have different number of total elements");
1393 
1394  return supported;
1395 }

References armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFakeQuantizationSupported()

bool IsFakeQuantizationSupported ( const TensorInfo input,
const FakeQuantizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1397 of file RefLayerSupport.cpp.

1400 {
1402  bool supported = true;
1403 
1404  std::array<DataType,1> supportedTypes =
1405  {
1407  };
1408 
1409  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1410  "Reference fake quantization: input type not supported.");
1411 
1412  return supported;
1413 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFillSupported()

bool IsFillSupported ( const TensorInfo input,
const TensorInfo output,
const FillDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 1415 of file RefLayerSupport.cpp.

1419 {
1422 
1423  bool supported = true;
1424 
1425  std::array<DataType,3> supportedTypes =
1426  {
1430  };
1431 
1432  supported &= CheckSupportRule(TypeIs(input, DataType::Signed32), reasonIfUnsupported,
1433  "Reference Fill: input type not supported.");
1434 
1435  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1436  "Reference Fill: output type not supported.");
1437  return supported;
1438 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFloorSupported()

bool IsFloorSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1440 of file RefLayerSupport.cpp.

1443 {
1445  bool supported = true;
1446 
1447  std::array<DataType,3> supportedTypes =
1448  {
1451  };
1452 
1453  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1454  "Reference Floor: input type not supported.");
1455 
1456  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1457  "Reference Floor: output type not supported.");
1458 
1459  return supported;
1460 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsFullyConnectedSupported()

bool IsFullyConnectedSupported ( const TensorInfo input,
const TensorInfo output,
const TensorInfo weights,
const TensorInfo biases,
const FullyConnectedDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1462 of file RefLayerSupport.cpp.

1468 {
1469  bool supported = true;
1470 
1471  // Define supported types.
1472  std::array<DataType,6> supportedTypes =
1473  {
1479  };
1480 
1481  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1482  "Reference Fully Connected: input type not supported.");
1483 
1484  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1485  "Reference Fully Connected: output type not supported.");
1486 
1487  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1488  "Reference Fully Connected: weights type not supported.");
1489 
1490  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1491  "Reference Fully Connected: input and output types mismatched.");
1492 
1493  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1494  "Reference Fully Connected: weights is not a supported type.");
1495 
1496  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1497  "Reference Fully Connected: input and weights types mismatched.");
1498 
1499  if (descriptor.m_BiasEnabled)
1500  {
1501  // Defined supported types for bias
1502  std::array<DataType, 5>
1503  supportedBiasTypes =
1504  {
1509  };
1510 
1511  supported &= CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1512  "Reference Fully Connected: bias type not supported.");
1513 
1514  supported &= CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1515  "Reference Fully Connected: bias and weight types mismatch.");
1516 
1517  supported &= CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1518  "Reference Fully Connected: bias type inferred from weights is incompatible.");
1519 
1520  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1521  "Reference Fully Connected: bias must have 1 dimension.");
1522 
1523  }
1524 
1525  return supported;
1526 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsGatherNdSupported()

bool IsGatherNdSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const

Definition at line 1528 of file RefLayerSupport.cpp.

1532 {
1533  bool supported = true;
1534  std::array<DataType,7> supportedTypes =
1535  {
1542  };
1543 
1544  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1545  "Reference GatherNd: input type not supported");
1546 
1547  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1548  "Reference GatherNd: output type not supported");
1549 
1551  "Reference GatherNd: indices (input1) type not supported");
1552 
1553  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1554  "Reference GatherNd: input and output types not matching");
1555 
1556  return supported;
1557 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsGatherSupported()

bool IsGatherSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const GatherDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1559 of file RefLayerSupport.cpp.

1564 {
1565  bool supported = true;
1566  std::array<DataType,7> supportedTypes =
1567  {
1574  };
1575 
1577  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1578  "Reference Gather: input type not supported");
1579 
1580  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1581  "Reference Gather: output type not supported");
1582 
1584  "Reference Gather: indices (input1) type not supported");
1585 
1586  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1587  "Reference Gather: input and output types not matching");
1588 
1589  return supported;
1590 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsInputSupported()

bool IsInputSupported ( const TensorInfo input,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1592 of file RefLayerSupport.cpp.

1594 {
1595  return true;
1596 }

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsInstanceNormalizationSupported()

bool IsInstanceNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const InstanceNormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1598 of file RefLayerSupport.cpp.

1602 {
1604  // Define supported types
1605  std::array<DataType, 3> supportedTypes =
1606  {
1609  };
1610 
1611  bool supported = true;
1612 
1613  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1614  "Reference Instance Normalization: input type not supported.");
1615 
1616  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1617  "Reference Instance Normalization: output type not supported.");
1618 
1619  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1620  "Reference Instance Normalization: input and output types mismatched.");
1621 
1622  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1623  "Reference Instance Normalization: input and output shapes have different "
1624  "num total elements.");
1625 
1626  return supported;
1627 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsL2NormalizationSupported()

bool IsL2NormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const L2NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1629 of file RefLayerSupport.cpp.

1633 {
1635  // Define supported types
1636  std::array<DataType, 6> supportedTypes =
1637  {
1643  };
1644 
1645  bool supported = true;
1646 
1647  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1648  "Reference L2normalization: input type not supported.");
1649 
1650  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1651  "Reference L2normalization: output type not supported.");
1652 
1653  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1654  "Reference L2normalization: input and output types mismatched.");
1655 
1656  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1657  "Reference L2normalization: input and output shapes have different "
1658  "num total elements.");
1659 
1660  return supported;
1661 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLayerSupported()

bool IsLayerSupported ( const LayerType type,
const std::vector< TensorInfo > &  infos,
const BaseDescriptor descriptor,
const Optional< LstmInputParamsInfo > &  lstmParamsInfo,
const Optional< QuantizedLstmInputParamsInfo > &  quantizedLstmInputParamsInfo,
Optional< std::string & >  reasonIfUnsupported 
) const
overridevirtual

Reimplemented from LayerSupportBase.

Definition at line 61 of file RefLayerSupport.cpp.

67 {
68  switch (type)
69  {
71  return IsActivationSupported(infos[0],
72  infos[1],
73  *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
76  return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
78  return IsArgMinMaxSupported(infos[0],
79  infos[1],
80  *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
83  return IsBatchMatMulSupported(infos[0],
84  infos[1],
85  infos[2],
86  *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
89  return IsBatchNormalizationSupported(infos[0],
90  infos[1],
91  infos[2],
92  infos[3],
93  infos[4],
94  infos[5],
95  *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
96  (&descriptor)),
99  return IsBatchToSpaceNdSupported(infos[0],
100  infos[1],
101  *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
104  return IsComparisonSupported(infos[0],
105  infos[1],
106  infos[2],
107  *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
109  case LayerType::Concat:
110  {
111  std::vector<const TensorInfo*> inputInfos;
112  for (uint32_t i = 0; i < (infos.size() - 1); i++)
113  {
114  inputInfos.push_back(&infos[i]);
115  }
116  return IsConcatSupported(inputInfos,
117  infos[infos.size() - 1],
118  *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
120  }
121  case LayerType::Constant:
122  return IsConstantSupported(infos[0], reasonIfUnsupported);
124  return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
126  return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
128  {
129  if (infos.size() != 4)
130  {
131  throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
132  "TensorInfos should be of format: {input, output, weights, biases}.");
133  }
134 
135  auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
136  if (infos[3] == TensorInfo())
137  {
138  return IsConvolution2dSupported(infos[0],
139  infos[1],
140  desc,
141  infos[2],
142  EmptyOptional(),
144  }
145  else
146  {
147  return IsConvolution2dSupported(infos[0],
148  infos[1],
149  desc,
150  infos[2],
151  infos[3],
153  }
154  }
156  return IsDepthToSpaceSupported(infos[0],
157  infos[1],
158  *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
161  {
162  if (infos.size() != 4)
163  {
164  throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
165  "TensorInfos should be of format: {input, output, weights, biases}.");
166  }
167 
168  auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
169  if (infos[3] == TensorInfo())
170  {
171  return IsDepthwiseConvolutionSupported(infos[0],
172  infos[1],
173  desc,
174  infos[2],
175  EmptyOptional(),
177  }
178  else
179  {
180  return IsDepthwiseConvolutionSupported(infos[0],
181  infos[1],
182  desc,
183  infos[2],
184  infos[3],
186  }
187  }
189  return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
190  case LayerType::Division:
191  return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
193  return IsElementwiseUnarySupported(infos[0],
194  infos[1],
195  *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
197  case LayerType::Fill:
198  return IsFillSupported(infos[0],
199  infos[1],
200  *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
202  case LayerType::Floor:
203  return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
205  return IsFullyConnectedSupported(infos[0],
206  infos[1],
207  infos[2],
208  infos[3],
209  *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
211  case LayerType::Gather:
212  return IsGatherSupported(infos[0],
213  infos[1],
214  infos[2],
215  *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
217  case LayerType::GatherNd:
218  return IsGatherNdSupported(infos[0],
219  infos[1],
220  infos[2],
222  case LayerType::Input:
223  return IsInputSupported(infos[0], reasonIfUnsupported);
225  return IsInstanceNormalizationSupported(infos[0],
226  infos[1],
227  *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
228  (&descriptor)),
231  return IsL2NormalizationSupported(infos[0],
232  infos[1],
233  *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
236  return IsLogicalBinarySupported(infos[0],
237  infos[1],
238  infos[2],
239  *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
242  return IsLogSoftmaxSupported(infos[0],
243  infos[1],
244  *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
246  case LayerType::Lstm:
247  return IsLstmSupported(infos[0],
248  infos[1],
249  infos[2],
250  infos[3],
251  infos[4],
252  infos[5],
253  infos[6],
254  *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
255  lstmParamsInfo.value(),
257  case LayerType::QLstm:
258  return IsQLstmSupported(infos[0],
259  infos[1],
260  infos[2],
261  infos[3],
262  infos[4],
263  infos[5],
264  *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
265  lstmParamsInfo.value(),
267  case LayerType::Maximum:
268  return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
269  case LayerType::Mean:
270  return IsMeanSupported(infos[0],
271  infos[1],
272  *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
274  case LayerType::Minimum:
275  return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
277  return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
279  return IsNormalizationSupported(infos[0],
280  infos[1],
281  *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
283  case LayerType::Output:
284  return IsOutputSupported(infos[0], reasonIfUnsupported);
285  case LayerType::Pad:
286  return IsPadSupported(infos[0],
287  infos[1],
288  *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
290  case LayerType::Permute:
291  return IsPermuteSupported(infos[0],
292  infos[1],
293  *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
296  return IsPooling2dSupported(infos[0],
297  infos[1],
298  *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
300  case LayerType::Prelu:
301  return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
302  case LayerType::Quantize:
303  return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
304  case LayerType::Reshape:
305  return IsReshapeSupported(infos[0],
306  infos[1],
307  *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
309  case LayerType::Resize:
310  return IsResizeSupported(infos[0],
311  infos[1],
312  *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
314  case LayerType::Reduce:
315  return IsReduceSupported(infos[0],
316  infos[1],
317  *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
319  case LayerType::Slice:
320  return IsSliceSupported(infos[0],
321  infos[1],
322  *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
324  case LayerType::Softmax:
325  return IsSoftmaxSupported(infos[0],
326  infos[1],
327  *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
330  return IsSpaceToBatchNdSupported(infos[0],
331  infos[1],
332  *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
335  return IsSpaceToDepthSupported(infos[0],
336  infos[1],
337  *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
339  case LayerType::Splitter:
340  {
341  std::vector<TensorInfo> outputInfos;
342  for (uint32_t i = 1; i < infos.size(); i++)
343  {
344  outputInfos.push_back(infos[i]);
345  }
346  return IsSplitterSupported(infos[0],
347  {outputInfos.begin(), outputInfos.end()},
348  *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
350  }
351  case LayerType::Stack:
352  {
353  std::vector<const TensorInfo*> inputInfos;
354  for (uint32_t i = 0; i < infos.size() - 1; i++)
355  {
356  inputInfos.push_back(&infos[i]);
357  }
358  return IsStackSupported(inputInfos,
359  infos[infos.size() - 1],
360  *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
362  }
364  return IsStridedSliceSupported(infos[0],
365  infos[1],
366  *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
369  return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
371  return IsTransposeSupported(infos[0],
372  infos[1],
373  *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
376  {
377  if (infos.size() != 4)
378  {
379  throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
380  "TensorInfos should be of format: {input, output, weights, biases}.");
381  }
382 
383  auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
384  if (infos[3] == TensorInfo())
385  {
386  return IsTransposeConvolution2dSupported(infos[0],
387  infos[1],
388  desc,
389  infos[2],
390  EmptyOptional(),
392  }
393  else
394  {
395  return IsTransposeConvolution2dSupported(infos[0],
396  infos[1],
397  desc,
398  infos[2],
399  infos[3],
401  }
402  }
403  case LayerType::Cast:
404  return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
406  return IsChannelShuffleSupported(infos[0],
407  infos[1],
408  *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
411  {
412  if (infos.size() != 4)
413  {
414  throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
415  "TensorInfos should be of format: {input, output, weights, biases}.");
416  }
417 
418  auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
419  if (infos[3] == TensorInfo())
420  {
421  return IsConvolution3dSupported(infos[0],
422  infos[1],
423  desc,
424  infos[2],
425  EmptyOptional(),
427  }
428  else
429  {
430  return IsConvolution3dSupported(infos[0],
431  infos[1],
432  desc,
433  infos[2],
434  infos[3],
436  }
437  }
438  case LayerType::Debug:
439  return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
441  return IsDetectionPostProcessSupported(infos[0],
442  infos[1],
443  infos[2],
444  infos[3],
445  infos[4],
446  infos[5],
447  infos[6],
448  *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
449  (&descriptor)),
452  return IsFakeQuantizationSupported(infos[0],
453  *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
455  case LayerType::MemCopy:
456  return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
457  case LayerType::Rank:
458  return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
459  case LayerType::Shape:
460  return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
462  {
463  if (infos.size() != 6)
464  {
465  throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
466  "should be of format: {input, outputStateIn, cellStateIn, "
467  "hiddenStateOutputVal, cellStateOutputVal, output}");
468  }
469  auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
471  infos[1],
472  infos[2],
473  infos[3],
474  infos[4],
475  infos[5],
476  desc,
477  lstmParamsInfo.value(),
479  }
481  return IsPooling3dSupported(infos[0],
482  infos[1],
483  *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
485  case LayerType::Map:
486  return true;
487  case LayerType::Unmap:
488  return true;
491  case LayerType::Merge:
492  return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
495  infos[1],
496  infos[2],
497  infos[3],
498  infos[4],
499  quantizedLstmInputParamsInfo.value(),
501  default:
502  // layers not supported in neon by default:
503  // precompiled, standin, switch
504  return false;
505  }
506 }

References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, ILayerSupport::descriptor, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchMatMulSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QLstm, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, ILayerSupport::reasonIfUnsupported, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::Shape, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().

◆ IsLogicalBinarySupported()

bool IsLogicalBinarySupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
const LogicalBinaryDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const
override

Definition at line 1663 of file RefLayerSupport.cpp.

1668 {
1670 
1671  std::array<DataType, 1> supportedTypes =
1672  {
1674  };
1675 
1676  bool supported = true;
1677  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1678  "Reference LogicalBinary: input 0 type not supported");
1679  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1680  "Reference LogicalBinary: input 1 type not supported");
1681 
1682  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1683  "Reference LogicalBinary: input and output types do not match");
1684 
1685  return supported;
1686 }

References armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::IgnoreUnused(), ILayerSupport::input1, ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLogSoftmaxSupported()

bool IsLogSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const LogSoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported 
) const
override

Definition at line 1688 of file RefLayerSupport.cpp.

1692 {
1694 
1695  std::array<DataType, 3> supportedTypes =
1696  {
1699  };
1700 
1701  bool supported = true;
1702  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1703  "Reference LogSoftmax: input type not supported");
1704 
1705  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1706  "Reference LogSoftmax: output type not supported");
1707 
1708  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1709  "Reference LogSoftmax: input and output types do not match");
1710 
1711  return supported;
1712 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsLstmSupported()

bool IsLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo scratchBuffer,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const LstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1714 of file RefLayerSupport.cpp.

1724 {
1727 
1728  bool supported = true;
1729 
1730  std::array<DataType,3> supportedTypes = {
1733  };
1734 
1735  // check inputs and outputs
1736  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1737  "Reference Lstm: input is not a supported type.");
1738  supported &= CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1739  "Reference Lstm: input and outputStateIn types are mismatched");
1740  supported &= CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1741  "Reference Lstm: input and cellStateIn types are mismatched");
1742  supported &= CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1743  "Reference Lstm: input and scratchBuffer types are mismatched");
1744  supported &= CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1745  "Reference Lstm: input and outputStateOut types are mismatched");
1746  supported &= CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1747  "Reference Lstm: input and cellStateOut types are mismatched");
1748 
1749  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1750  "Reference Lstm: input and output types are mismatched");
1751  // check layer parameters
1752  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1753  "Reference Lstm: input and InputToForgetWeights types are mismatched");
1754  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1755  "Reference Lstm: input and InputToCellWeights types are mismatched");
1756  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1757  "Reference Lstm: input and InputToOutputWeights types are mismatched");
1758  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1759  "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1760  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1761  "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1762  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1763  "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1764  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1765  "Reference Lstm: input and ForgetGateBias types are mismatched");
1766  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1767  "Reference Lstm: input and CellBias types are mismatched");
1768  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1769  "Reference Lstm: input and OutputGateBias types are mismatched");
1770  if (!descriptor.m_CifgEnabled)
1771  {
1772  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1773  "Reference Lstm: input and InputToInputWeights types are mismatched");
1774  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1776  "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1777  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1778  "Reference Lstm: input and InputGateBias types are mismatched");
1779  if (descriptor.m_PeepholeEnabled)
1780  {
1781  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1783  "Reference Lstm: input and CellToInputWeights types are mismatched");
1784  }
1785  }
1786  if (descriptor.m_PeepholeEnabled)
1787  {
1788  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1789  "Reference Lstm: input and CellToForgetWeights types are mismatched");
1790  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1791  "Reference Lstm: input and CellToOutputWeights types are mismatched");
1792  }
1793  if (descriptor.m_ProjectionEnabled)
1794  {
1795  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1796  "Reference Lstm: input and mProjectionWeights types are mismatched");
1797  if (paramsInfo.m_ProjectionBias != nullptr)
1798  {
1799  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1800  "Reference Lstm: input and ProjectionBias types are mismatched");
1801  }
1802  }
1803  if (descriptor.m_LayerNormEnabled)
1804  {
1805  if (!descriptor.m_CifgEnabled)
1806  {
1807  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1809  "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1810  }
1811  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1813  "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1814  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1816  "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1817  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1819  "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
1820  }
1821 
1822  return supported;
1823 }

References ILayerSupport::cellStateIn, ILayerSupport::cellStateOut, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputStateIn, ILayerSupport::outputStateOut, ILayerSupport::paramsInfo, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and ILayerSupport::scratchBuffer.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMaximumSupported()

bool IsMaximumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1825 of file RefLayerSupport.cpp.

1829 {
1830  bool supported = true;
1831 
1832  std::array<DataType,7> supportedTypes = {
1839  };
1840 
1841  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1842  "Reference maximum: input 0 is not a supported type.");
1843 
1844  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1845  "Reference maximum: input 1 is not a supported type.");
1846 
1847  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1848  "Reference maximum: output is not a supported type.");
1849 
1850  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1851  "Reference maximum: input 0 and Input 1 types are mismatched");
1852 
1853  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1854  "Reference maximum: input and output types are mismatched");
1855 
1856  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1857  "Reference maximum: shapes are not suitable for implicit broadcast.");
1858 
1859  return supported;
1860 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMeanSupported()

bool IsMeanSupported ( const TensorInfo input,
const TensorInfo output,
const MeanDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1862 of file RefLayerSupport.cpp.

1866 {
1867  bool supported = true;
1868  std::string meanLayerStr = "Mean";
1869  std::string outputTensorStr = "output";
1870 
1871  std::array<DataType,6> supportedTypes =
1872  {
1878  };
1879 
1880  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1881  "Reference Mean: input type not supported.");
1882 
1883  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1884  "Reference Mean: input and output types are mismatched");
1885 
1886  if (descriptor.m_KeepDims)
1887  {
1888  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1890  CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1892  meanLayerStr, outputTensorStr).data());
1893  }
1894  else if (descriptor.m_Axis.empty())
1895  {
1896  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1898  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1899  meanLayerStr, outputTensorStr).data());
1900  }
1901  else
1902  {
1903  auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1904 
1905  if (outputDim > 0)
1906  {
1907  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1909  CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1910  meanLayerStr, outputTensorStr).data());
1911  }
1912  else
1913  {
1914  supported &= CheckSupportRule(TensorNumDimensionsAreCorrect(output, 1),
1916  CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1917  meanLayerStr, outputTensorStr).data());
1918  }
1919  }
1920 
1921  return supported;
1922 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMemCopySupported()

bool IsMemCopySupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1924 of file RefLayerSupport.cpp.

1927 {
1928  bool supported = true;
1929 
1930  std::array<DataType,7> supportedTypes =
1931  {
1939  };
1940 
1941  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1942  "Reference MemCopy: input type not supported");
1943 
1944  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1945  "Reference MemCopy: output type not supported");
1946 
1947  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1948  "Reference MemCopy: input and output types are mismatched");
1949 
1950  return supported;
1951 }

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMinimumSupported()

bool IsMinimumSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1953 of file RefLayerSupport.cpp.

1957 {
1958  bool supported = true;
1959 
1960  std::array<DataType,7> supportedTypes = {
1967  };
1968 
1969  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1970  "Reference minimum: input 0 is not a supported type.");
1971 
1972  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1973  "Reference minimum: input 1 is not a supported type.");
1974 
1975  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1976  "Reference minimum: output is not a supported type.");
1977 
1978  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1979  "Reference minimum: input 0 and Input 1 types are mismatched");
1980 
1981  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1982  "Reference minimum: input and output types are mismatched");
1983 
1984  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1985  "Reference minimum: shapes are not suitable for implicit broadcast.");
1986 
1987  return supported;
1988 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsMultiplicationSupported()

bool IsMultiplicationSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 1990 of file RefLayerSupport.cpp.

1994 {
1995  bool supported = true;
1996 
1997  std::array<DataType,7> supportedTypes = {
2004  };
2005 
2006  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2007  "Reference multiplication: input 0 is not a supported type.");
2008 
2009  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2010  "Reference multiplication: input 1 is not a supported type.");
2011 
2012  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2013  "Reference multiplication: output is not a supported type.");
2014 
2015  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2016  "Reference multiplication: input 0 and Input 1 types are mismatched");
2017 
2018  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2019  "Reference multiplication: input and output types are mismatched");
2020 
2021  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2022  "Reference multiplication: shapes are not suitable for implicit broadcast.");
2023 
2024  return supported;
2025 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsNormalizationSupported()

bool IsNormalizationSupported ( const TensorInfo input,
const TensorInfo output,
const NormalizationDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2027 of file RefLayerSupport.cpp.

2031 {
2033 
2034  // Define supported types
2035  std::array<DataType, 6> supportedTypes =
2036  {
2042  };
2043 
2044  bool supported = true;
2045 
2046  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2047  "Reference normalization: input type not supported.");
2048 
2049  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2050  "Reference normalization: output type not supported.");
2051 
2052  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2053  "Reference normalization: input and output shapes have different "
2054  "num total elements.");
2055 
2056  return supported;
2057 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsOutputSupported()

bool IsOutputSupported ( const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2059 of file RefLayerSupport.cpp.

2061 {
2062  return true;
2063 }

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPadSupported()

bool IsPadSupported ( const TensorInfo input,
const TensorInfo output,
const PadDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2065 of file RefLayerSupport.cpp.

2069 {
2071  bool supported = true;
2072 
2073  // Define supported output and inputs types.
2074  std::array<DataType,6> supportedTypes =
2075  {
2081  };
2082 
2083  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2084  "Reference pad: input is not a supported type.");
2085 
2086  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2087  "Reference pad: output is not a supported type.");
2088 
2089  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2090  "Reference pad: input and output types are mismatched.");
2091 
2092  return supported;
2093 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPermuteSupported()

bool IsPermuteSupported ( const TensorInfo input,
const TensorInfo output,
const PermuteDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2095 of file RefLayerSupport.cpp.

2099 {
2101  bool supported = true;
2102 
2103  // Define supported output and inputs types.
2104  std::array<DataType, 6> supportedTypes =
2105  {
2112  };
2113 
2114  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2115  "Reference permute: input is not a supported type.");
2116 
2117  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2118  "Reference permute: output is not a supported type.");
2119 
2120  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2121  "Reference permute: input and output types are mismatched.");
2122 
2123  return supported;
2124 }

References armnn::BFloat16, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPooling2dSupported()

bool IsPooling2dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling2dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2126 of file RefLayerSupport.cpp.

2130 {
2132  bool supported = true;
2133 
2134  // Define supported output and inputs types.
2135  std::array<DataType,6> supportedTypes =
2136  {
2142  };
2143 
2144  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2145  "Reference poolind2d: input is not a supported type.");
2146 
2147  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2148  "Reference poolind2d: output is not a supported type.");
2149 
2150  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2151  "Reference poolind2d: input and output types are mismatched.");
2152 
2153  return supported;
2154 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPooling3dSupported()

bool IsPooling3dSupported ( const TensorInfo input,
const TensorInfo output,
const Pooling3dDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2156 of file RefLayerSupport.cpp.

2160 {
2162  bool supported = true;
2163 
2164  // Define supported output and inputs types.
2165  std::array<DataType,6> supportedTypes =
2166  {
2172  };
2173 
2174  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2175  "Reference poolind3d: input is not a supported type.");
2176 
2177  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2178  "Reference poolind3d: output is not a supported type.");
2179 
2180  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2181  "Reference poolind3d: input and output types are mismatched.");
2182 
2183  return supported;
2184 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsPreluSupported()

bool IsPreluSupported ( const TensorInfo input,
const TensorInfo alpha,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2604 of file RefLayerSupport.cpp.

2608 {
2609  bool supported = true;
2610 
2611  std::array<DataType, 6> supportedTypes
2612  {
2618  };
2619 
2620  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2621  "PReLU: input is not a supported type.");
2622 
2623  supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2624  "PReLU: alpha is not a supported type.");
2625 
2626  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2627  "PReLU: output is not a supported type.");
2628 
2629  supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2630  "PReLU: input, alpha and output types are mismatched");
2631 
2632  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2633  "PReLU: shapes are not suitable for implicit broadcast");
2634 
2635  return supported;
2636 }

References ILayerSupport::alpha, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsQLstmSupported()

bool IsQLstmSupported ( const TensorInfo input,
const TensorInfo previousOutputIn,
const TensorInfo previousCellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const QLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

◆ IsQuantizeSupported()

bool IsQuantizeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2211 of file RefLayerSupport.cpp.

2214 {
2215  bool supported = true;
2216 
2217  // Define supported input types.
2218  std::array<DataType,7> supportedInputTypes = {
2225  };
2226 
2227  supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2228  "Reference quantize: input type not supported.");
2229 
2230  // Define supported output types.
2231  std::array<DataType,4> supportedOutputTypes = {
2236  };
2237  supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2238  "Reference quantize: output type not supported.");
2239 
2240  supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2241  "Reference quantize: input and output shapes have different num total elements.");
2242 
2243  return supported;
2244 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsRankSupported()

bool IsRankSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2246 of file RefLayerSupport.cpp.

2249 {
2250  IgnoreUnused(input);
2251  // Define supported output types.
2252  std::array<DataType,1> supportedOutputTypes =
2253  {
2255  };
2256 
2257  return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2258  "Reference rank: input type not supported.");
2259 }

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReduceSupported()

bool IsReduceSupported ( const TensorInfo input,
const TensorInfo output,
const ReduceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2261 of file RefLayerSupport.cpp.

2265 {
2267  bool supported = true;
2268  std::array<DataType,7> supportedTypes =
2269  {
2276  };
2277 
2278  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2279  "Reference Reduce: input type not supported");
2280 
2281  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2282  "Reference Reduce: output type not supported");
2283 
2284  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2285  "Reference Reduce: input and output types not matching");
2286 
2287  return supported;
2288 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsReshapeSupported()

bool IsReshapeSupported ( const TensorInfo input,
const TensorInfo output,
const ReshapeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2290 of file RefLayerSupport.cpp.

2294 {
2297  // Define supported output types.
2298  std::array<DataType,8> supportedOutputTypes =
2299  {
2308  };
2309 
2310  return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2311  "Reference reshape: input type not supported.");
2312 }

References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsResizeSupported()

bool IsResizeSupported ( const TensorInfo input,
const TensorInfo output,
const ResizeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2314 of file RefLayerSupport.cpp.

2318 {
2320  bool supported = true;
2321  std::array<DataType,6> supportedTypes =
2322  {
2329  };
2330 
2331  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2332  "Reference Resize: input type not supported");
2333 
2334  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2335  "Reference Resize: output type not supported");
2336 
2337  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2338  "Reference Resize: input and output types not matching");
2339 
2340  return supported;
2341 }

References armnn::BFloat16, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsShapeSupported()

bool IsShapeSupported ( const TensorInfo input,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2343 of file RefLayerSupport.cpp.

2346 {
2347  IgnoreUnused(input);
2348  bool supported = true;
2349 
2350  std::array<DataType, 1> supportedTypes =
2351  {
2353  };
2354 
2355  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2356  "Reference Shape: output type not supported");
2357 
2358  return supported;
2359 }

References armnn::CheckSupportRule(), armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSliceSupported()

bool IsSliceSupported ( const TensorInfo input,
const TensorInfo output,
const SliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2361 of file RefLayerSupport.cpp.

2365 {
2367  bool supported = true;
2368 
2369  std::array<DataType, 5> supportedTypes =
2370  {
2375  };
2376 
2377  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2378  "Reference Slice: input type not supported");
2379 
2380  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2381  "Reference Slice: output type not supported");
2382 
2383  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2384  "Reference Slice: input and output types are mismatched");
2385 
2386  return supported;
2387 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSoftmaxSupported()

bool IsSoftmaxSupported ( const TensorInfo input,
const TensorInfo output,
const SoftmaxDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2389 of file RefLayerSupport.cpp.

2393 {
2395  bool supported = true;
2396  std::array<DataType,7> supportedTypes =
2397  {
2404  };
2405 
2406  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2407  "Reference Softmax: output type not supported");
2408 
2409  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2410  "Reference Softmax: input type not supported");
2411 
2412  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2413  "Reference Softmax: input type not supported");
2414 
2415  return supported;
2416 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSpaceToBatchNdSupported()

bool IsSpaceToBatchNdSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToBatchNdDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2418 of file RefLayerSupport.cpp.

2422 {
2424  bool supported = true;
2425  std::array<DataType,6> supportedTypes =
2426  {
2432  };
2433 
2434  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2435  "Reference SpaceToBatchNd: input type not supported");
2436 
2437  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2438  "Reference SpaceToBatchNd: output type not supported");
2439 
2440  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2441  "Reference SpaceToBatchNd: input and output types are mismatched");
2442 
2443  return supported;
2444 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSpaceToDepthSupported()

bool IsSpaceToDepthSupported ( const TensorInfo input,
const TensorInfo output,
const SpaceToDepthDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2446 of file RefLayerSupport.cpp.

2450 {
2451 
2453  bool supported = true;
2454 
2455  std::array<DataType,6> supportedTypes =
2456  {
2462  };
2463 
2464  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2465  "Reference SpaceToDepth: input type not supported");
2466 
2467  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2468  "Reference SpaceToDepth: output type not supported");
2469 
2470  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2471  "Reference SpaceToDepth: input and output types are mismatched");
2472 
2473  return supported;
2474 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSplitterSupported()

bool IsSplitterSupported ( const TensorInfo input,
const std::vector< std::reference_wrapper< TensorInfo >> &  outputs,
const ViewsDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2476 of file RefLayerSupport.cpp.

2480 {
2482  bool supported = true;
2483  std::array<DataType,6> supportedTypes =
2484  {
2490  };
2491 
2492  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2493  "Reference splitter: output type not supported");
2494  for (const TensorInfo& output : outputs)
2495  {
2496  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2497  "Reference splitter: input type not supported");
2498 
2499  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2500  "Reference splitter: input and output types mismatched.");
2501  }
2502 
2503  return supported;
2504 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputs, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsStackSupported()

bool IsStackSupported ( const std::vector< const TensorInfo * > &  inputs,
const TensorInfo output,
const StackDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2506 of file RefLayerSupport.cpp.

2510 {
2512 
2513  bool supported = true;
2514  std::array<DataType,7> supportedTypes =
2515  {
2522  };
2523 
2524  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2525  "Reference stack: output type not supported");
2526  for (const TensorInfo* input : inputs)
2527  {
2528  ARMNN_ASSERT(input != nullptr);
2529  supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2530  "Reference stack: input type not supported");
2531 
2532  supported &= CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2533  "Reference stack: input and output types mismatched.");
2534  }
2535 
2536  return supported;
2537 }

References ARMNN_ASSERT, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsStridedSliceSupported()

bool IsStridedSliceSupported ( const TensorInfo input,
const TensorInfo output,
const StridedSliceDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2539 of file RefLayerSupport.cpp.

2543 {
2545  bool supported = true;
2546 
2547  std::array<DataType,5> supportedTypes =
2548  {
2553  };
2554 
2555  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2556  "Reference StridedSlice: input type not supported");
2557 
2558  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2559  "Reference StridedSlice: output type not supported");
2560 
2561  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2562  "Reference StridedSlice: input and output types are mismatched");
2563 
2564  return supported;
2565 }

References armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsSubtractionSupported()

bool IsSubtractionSupported ( const TensorInfo input0,
const TensorInfo input1,
const TensorInfo output,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2567 of file RefLayerSupport.cpp.

2571 {
2572  bool supported = true;
2573 
2574  std::array<DataType,7> supportedTypes = {
2581  };
2582 
2583  supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2584  "Reference subtraction: input 0 is not a supported type.");
2585 
2586  supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2587  "Reference subtraction: input 1 is not a supported type.");
2588 
2589  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2590  "Reference subtraction: output is not a supported type.");
2591 
2592  supported &= CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2593  "Reference subtraction: input 0 and Input 1 types are mismatched");
2594 
2595  supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2596  "Reference subtraction: input and output types are mismatched");
2597 
2598  supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2599  "Reference subtraction: shapes are not suitable for implicit broadcast.");
2600 
2601  return supported;
2602 }

References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, ILayerSupport::input1, ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTransposeConvolution2dSupported()

bool IsTransposeConvolution2dSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeConvolution2dDescriptor descriptor,
const TensorInfo weights,
const Optional< TensorInfo > &  biases,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2638 of file RefLayerSupport.cpp.

2644 {
2646  bool supported = true;
2647 
2648  std::array<DataType,7> supportedTypes =
2649  {
2656  };
2657 
2658  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2659  "Reference TransposeConvolution2d: input is not a supported type.");
2660 
2661  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2662  "Reference TransposeConvolution2d: output is not a supported type.");
2663 
2664  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2665  "Reference TransposeConvolution2d: input and output types mismatched.");
2666 
2667 
2668  const DataType inputType = input.GetDataType();
2669  if (IsQuantized8BitType(inputType))
2670  {
2671  std::array<DataType, 3> supportedWeightTypes =
2672  {
2676  };
2677 
2678  supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2679  "Reference TransposeConvolution2d: weights type not supported for "
2680  "quantized input.");
2681  }
2682  else
2683  {
2684  supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2685  "Reference TransposeConvolution2d: weights is not a supported type.");
2686 
2687  supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2688  "Reference TransposeConvolution2d: input and weights types mismatched.");
2689  }
2690 
2691  if (biases.has_value())
2692  {
2693  std::array<DataType,4> biasesSupportedTypes =
2694  {
2698  };
2699  supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2700  "Reference TransposeConvolution2d: biases is not a supported type.");
2701  }
2702 
2703  return supported;
2704 }

References ILayerSupport::biases, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, ILayerSupport::reasonIfUnsupported, armnn::Signed32, and ILayerSupport::weights.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsTransposeSupported()

bool IsTransposeSupported ( const TensorInfo input,
const TensorInfo output,
const TransposeDescriptor descriptor,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2706 of file RefLayerSupport.cpp.

2710 {
2712  bool supported = true;
2713 
2714  // Define supported output and inputs types.
2715  std::array<DataType, 6> supportedTypes =
2716  {
2723  };
2724 
2725  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2726  "Reference transpose: input is not a supported type.");
2727 
2728  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2729  "Reference transpose: output is not a supported type.");
2730 
2731  supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2732  "Reference transpose: input and output types are mismatched.");
2733 
2734  return supported;
2735 }

References armnn::BFloat16, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and ILayerSupport::reasonIfUnsupported.

Referenced by RefLayerSupport::IsLayerSupported().

◆ IsUnidirectionalSequenceLstmSupported()

bool IsUnidirectionalSequenceLstmSupported ( const TensorInfo input,
const TensorInfo outputStateIn,
const TensorInfo cellStateIn,
const TensorInfo outputStateOut,
const TensorInfo cellStateOut,
const TensorInfo output,
const UnidirectionalSequenceLstmDescriptor descriptor,
const LstmInputParamsInfo paramsInfo,
Optional< std::string & >  reasonIfUnsupported = EmptyOptional() 
) const
override

Definition at line 2737 of file RefLayerSupport.cpp.

2747 {
2754  bool supported = true;
2755 
2756  std::array<DataType, 2> supportedTypes =
2757  {
2760  };
2761 
2762  std::array<DataType, 2> supportedWeightTypes =
2763  {
2766  };
2767 
2768  std::array<DataType, 3> supportedBiasTypes =
2769  {
2773  };
2774 
2775  // check inputs and outputs
2776  supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2777  "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2778  supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2779  "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2780 
2781  // check layer parameters
2782  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2784  "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2785  "is not a supported type.");
2786  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2788  "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2789  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2791  "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2792  "is not a supported type.");
2793  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2795  "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2796  "is not a supported type.");
2797  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2799  "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2800  "is not a supported type.");
2801  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2803  "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2804  "is not a supported type.");
2805 
2806  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2807  "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2808  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2809  "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2810  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2811  "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2812  if (!descriptor.m_CifgEnabled)
2813  {
2814  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2816  "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2817  "is not a supported type.");
2818  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2820  "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2821  "is not a supported type.");
2822  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2823  "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2824  if (descriptor.m_PeepholeEnabled)
2825  {
2826  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2828  "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2829  "is not a supported type.");
2830  }
2831  }
2832  if (descriptor.m_PeepholeEnabled)
2833  {
2834  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2836  "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2837  "is not a supported type.");
2838  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2840  "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2841  "is not a supported type.");
2842  }
2843  if (descriptor.m_ProjectionEnabled)
2844  {
2845  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2847  "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2848  "is not a supported type.");
2849  if (paramsInfo.m_ProjectionBias != nullptr)
2850  {
2851  supported &= CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2852  "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2853  "are mismatched");
2854  }
2855  }
2856  if (descriptor.m_LayerNormEnabled)
2857  {
2858  if (!descriptor.m_CifgEnabled)
2859  {
2860  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2862  "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2863  "is not a supported type.");
2864  }
2865  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2867  "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2868  "is not a supported type.");
2869  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2871  "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2872  "is not a supported type.");
2873  supported &= CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2875  "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2876  "is not a supported type.");
2877  }
2878 
2879  return supported;
2880 }

References ILayerSupport::cellStateIn, ILayerSupport::cellStateOut, armnn::CheckSupportRule(), ILayerSupport::descriptor, armnn::Float32, armnn::IgnoreUnused(), ILayerSupport::output, ILayerSupport::outputStateIn, ILayerSupport::outputStateOut, ILayerSupport::paramsInfo, armnn::QAsymmS8, ILayerSupport::reasonIfUnsupported, and armnn::Signed32.

Referenced by RefLayerSupport::IsLayerSupported().


The documentation for this class was generated from the following files:
armnn::RefLayerSupport::IsLstmSupported
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1714
armnn::LayerSupportBase::IsMemImportSupported
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:397
armnn::ActivationFunction::Abs
@ Abs
armnn::ActivationFunction::Elu
@ Elu
armnn::LayerType::Floor
@ Floor
armnn::RefLayerSupport::IsGatherNdSupported
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:1528
armnn::RefLayerSupport::IsConvolution2dSupported
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:967
armnn::LayerType::MemCopy
@ MemCopy
armnn::RefLayerSupport::IsDebugSupported
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1104
armnn::RefLayerSupport::IsResizeSupported
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2314
armnn::LayerType::Softmax
@ Softmax
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::RefLayerSupport::IsInputSupported
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1592
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::RefLayerSupport::IsActivationSupported
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:508
armnn::ILayerSupport::outputStateIn
const TensorInfo & outputStateIn
Definition: ILayerSupport.hpp:286
armnn::RefLayerSupport::IsQuantizeSupported
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2211
armnn::LayerType::Transpose
@ Transpose
armnn::RefLayerSupport::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2638
armnn::IsQuantized8BitType
constexpr bool IsQuantized8BitType(DataType dataType)
Definition: TypesUtils.hpp:289
armnn::RefLayerSupport::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1629
armnn::LayerType::ChannelShuffle
@ ChannelShuffle
armnn::ILayerSupport::paramsInfo
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const LstmDescriptor const LstmInputParamsInfo & paramsInfo
Definition: ILayerSupport.hpp:293
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::RefLayerSupport::IsTransposeSupported
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2706
armnn::ActivationFunction::Linear
@ Linear
armnn::RefLayerSupport::IsCastSupported
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:784
armnn::LayerType::ConvertFp32ToFp16
@ ConvertFp32ToFp16
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::ILayerSupport::detectionBoxes
const TensorInfo const TensorInfo const TensorInfo & detectionBoxes
Definition: ILayerSupport.hpp:174
armnn::LayerType::TransposeConvolution2d
@ TransposeConvolution2d
armnn::ILayerSupport::scratchBuffer
const TensorInfo const TensorInfo const TensorInfo & scratchBuffer
Definition: ILayerSupport.hpp:288
armnn::LayerType::Map
@ Map
armnn::DataType::Float16
@ Float16
armnn::LayerType::Input
@ Input
armnn::LayerType::Slice
@ Slice
armnn::ILayerSupport::reasonIfUnsupported
const TensorInfo const ActivationDescriptor Optional< std::string & > reasonIfUnsupported
Definition: ILayerSupport.hpp:43
armnn::LayerType::Maximum
@ Maximum
armnn::RefLayerSupport::IsNormalizationSupported
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2027
armnn::LayerType::Quantize
@ Quantize
armnn::RefLayerSupport::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2418
armnn::LayerType::ArgMinMax
@ ArgMinMax
armnn::RefLayerSupport::IsMemCopySupported
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1924
armnn::LayerType::Subtraction
@ Subtraction
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::RefLayerSupport::IsMeanSupported
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1862
armnn::RefLayerSupport::IsStridedSliceSupported
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2539
armnn::LayerType::Shape
@ Shape
armnn::ILayerSupport::previousOutputIn
const TensorInfo & previousOutputIn
Definition: ILayerSupport.hpp:405
armnn::RefLayerSupport::IsConstantSupported
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:909
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::RefLayerSupport::IsOutputSupported
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2059
armnn::DataType::Signed32
@ Signed32
armnn::RefLayerSupport::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2446
armnn::ILayerSupport::mean
const TensorInfo const TensorInfo & mean
Definition: ILayerSupport.hpp:63
armnn::LayerSupportBase::IsMergeSupported
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:404
armnn::ActivationFunction::HardSwish
@ HardSwish
armnn::LayerType::Merge
@ Merge
armnn::RefLayerSupport::IsMultiplicationSupported
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1990
armnn::LayerType::Permute
@ Permute
armnn::LayerType::ConvertFp16ToFp32
@ ConvertFp16ToFp32
armnn::LayerSupportBase::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: LayerSupportBase.cpp:509
armnn::LayerType::QLstm
@ QLstm
armnn::LayerType::Pad
@ Pad
armnn::RefLayerSupport::IsComparisonSupported
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:845
armnn::LayerType::Addition
@ Addition
armnn::RefLayerSupport::IsGatherSupported
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1559
armnn::LayerType::QuantizedLstm
@ QuantizedLstm
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::LayerType::Reduce
@ Reduce
armnn::RefLayerSupport::IsStackSupported
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2506
armnn::RefLayerSupport::IsRankSupported
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2246
armnn::RefLayerSupport::IsConvolution3dSupported
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1036
armnn::LayerType::Division
@ Division
armnn::RefLayerSupport::IsBatchMatMulSupported
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
Definition: RefLayerSupport.cpp:644
armnn::LayerType::Debug
@ Debug
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::RefLayerSupport::IsLogicalBinarySupported
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1663
armnn::RefLayerSupport::IsPreluSupported
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2604
armnn::RefLayerSupport::IsDivisionSupported
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1309
armnn::CheckSupportRule
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
Definition: LayerSupportRules.hpp:38
armnn::LayerType::Activation
@ Activation
armnn::ILayerSupport::detectionClasses
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionClasses
Definition: ILayerSupport.hpp:175
armnn::LayerType::Normalization
@ Normalization
armnn::LayerType::Comparison
@ Comparison
armnn::LayerType::Stack
@ Stack
armnn::ILayerSupport::descriptor
const TensorInfo const ActivationDescriptor & descriptor
Definition: ILayerSupport.hpp:42
armnn::LayerType::Reshape
@ Reshape
armnn::ILayerSupport::previousCellStateIn
const TensorInfo const TensorInfo & previousCellStateIn
Definition: ILayerSupport.hpp:406
armnn::LayerType::Gather
@ Gather
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::Fill
@ Fill
armnn::RefLayerSupport::IsFakeQuantizationSupported
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1397
armnn::ILayerSupport::numDetections
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & numDetections
Definition: ILayerSupport.hpp:177
armnn::LayerType::Resize
@ Resize
armnn::RefLayerSupport::IsConcatSupported
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:876
armnn::ILayerSupport::alpha
const TensorInfo & alpha
Definition: ILayerSupport.hpp:392
armnn::RefLayerSupport::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:735
armnn::RefLayerSupport::IsDetectionPostProcessSupported
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1267
armnn::LayerType::Rank
@ Rank
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::RefLayerSupport::IsChannelShuffleSupported
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:813
armnn::ActivationFunction::SoftReLu
@ SoftReLu
armnn::LayerType::LogicalBinary
@ LogicalBinary
armnn::LayerType::UnidirectionalSequenceLstm
@ UnidirectionalSequenceLstm
armnn::RefLayerSupport::IsPooling2dSupported
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2126
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::RefLayerSupport::IsShapeSupported
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2343
armnn::DataType::Float32
@ Float32
armnn::ILayerSupport::input1
const TensorInfo & input1
Definition: ILayerSupport.hpp:48
armnn::LayerType::GatherNd
@ GatherNd
armnn::RefLayerSupport::IsMaximumSupported
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1825
armnn::ILayerSupport::gamma
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & gamma
Definition: ILayerSupport.hpp:66
armnn::LayerType::Minimum
@ Minimum
armnn::LayerType::Constant
@ Constant
armnn::DataType::Signed64
@ Signed64
armnn::RefLayerSupport::IsPadSupported
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2065
armnn::LayerType::Lstm
@ Lstm
armnn::RefLayerSupport::IsElementwiseUnarySupported
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1346
armnn::LayerType::ElementwiseUnary
@ ElementwiseUnary
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::LayerType::FakeQuantization
@ FakeQuantization
armnn::RefLayerSupport::IsReshapeSupported
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2290
armnn::ILayerSupport::beta
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & beta
Definition: ILayerSupport.hpp:65
armnn::ActivationFunction::Square
@ Square
armnn::ILayerSupport::weights
const TensorInfo const Convolution2dDescriptor const TensorInfo & weights
Definition: ILayerSupport.hpp:127
armnn::RefLayerSupport::IsDepthToSpaceSupported
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1134
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::ILayerSupport::cellStateIn
const TensorInfo const TensorInfo & cellStateIn
Definition: ILayerSupport.hpp:287
armnn::ILayerSupport::scores
const TensorInfo & scores
Definition: ILayerSupport.hpp:172
armnn::LayerType::Unmap
@ Unmap
armnn::RefLayerSupport::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1163
armnn::RefLayerSupport::IsReduceSupported
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2261
armnn::LayerType::DetectionPostProcess
@ DetectionPostProcess
armnn::ILayerSupport::biases
const TensorInfo const Convolution2dDescriptor const TensorInfo const Optional< TensorInfo > & biases
Definition: ILayerSupport.hpp:128
armnn::LayerType::Mean
@ Mean
armnn::RefLayerSupport::IsArgMinMaxSupported
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:612
armnn::RefLayerSupport::IsFloorSupported
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1440
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::IsSupportedForDataTypeGeneric
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
Definition: LayerSupportCommon.hpp:27
armnn::LayerType::DepthToSpace
@ DepthToSpace
armnn::RefLayerSupport::IsSliceSupported
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2361
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::RefLayerSupport::IsSplitterSupported
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2476
armnn::RefLayerSupport::IsQLstmSupported
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2187
armnn::ILayerSupport::outputs
const std::vector< std::reference_wrapper< TensorInfo > > & outputs
Definition: ILayerSupport.hpp:488
armnn::ActivationFunction::TanH
@ TanH
armnn::RefLayerSupport::IsFillSupported
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1415
armnn::RefLayerSupport::IsInstanceNormalizationSupported
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1598
armnn::RefLayerSupport::IsSubtractionSupported
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2567
armnn::RefLayerSupport::IsMinimumSupported
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1953
armnn::UnaryOperation::LogicalNot
@ LogicalNot
armnn::ILayerSupport::detectionScores
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & detectionScores
Definition: ILayerSupport.hpp:176
armnn::ILayerSupport::anchors
const TensorInfo const TensorInfo & anchors
Definition: ILayerSupport.hpp:173
armnn::RefLayerSupport::IsConvertFp16ToFp32Supported
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:927
armnn::DataType::QSymmS8
@ QSymmS8
armnn::RefLayerSupport::IsAdditionSupported
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:575
armnn::LayerType::Concat
@ Concat
armnn::RefLayerSupport::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1462
armnn::DataType::QSymmS16
@ QSymmS16
armnn::LayerType::Cast
@ Cast
armnn::ActivationFunction::ReLu
@ ReLu
armnn::RefLayerSupport::IsSoftmaxSupported
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2389
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::ActivationFunction::Sqrt
@ Sqrt
armnn::RefLayerSupport::IsPermuteSupported
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2095
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::LayerType::Splitter
@ Splitter
armnn::ILayerSupport::output
const TensorInfo & output
Definition: ILayerSupport.hpp:41
armnn::LayerType::LogSoftmax
@ LogSoftmax
armnn::RefLayerSupport::IsPooling3dSupported
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2156
armnn::RefLayerSupport::IsDequantizeSupported
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:1232
armnn::LayerType::Output
@ Output
armnn::RefLayerSupport::IsBatchNormalizationSupported
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:689
armnn::DataType::Boolean
@ Boolean
armnn::RefLayerSupport::IsUnidirectionalSequenceLstmSupported
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:2737
armnn::LayerType::Multiplication
@ Multiplication
armnn::LayerType::MemImport
@ MemImport
armnn::RefLayerSupport::IsLogSoftmaxSupported
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const override
Definition: RefLayerSupport.cpp:1688
armnn::LayerType::Prelu
@ Prelu
armnn::ILayerSupport::outputStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo & outputStateOut
Definition: ILayerSupport.hpp:289
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::ILayerSupport::cellStateOut
const TensorInfo const TensorInfo const TensorInfo const TensorInfo const TensorInfo & cellStateOut
Definition: ILayerSupport.hpp:290
armnn::LayerType::Dequantize
@ Dequantize
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::RefLayerSupport::IsConvertFp32ToFp16Supported
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const override
Definition: RefLayerSupport.cpp:947
armnn::ActivationFunction::LeakyReLu
@ LeakyReLu