23.08
|
#include <RefLayerSupport.hpp>
|
bool | IsLayerSupported (const LayerType &type, const std::vector< TensorInfo > &infos, const BaseDescriptor &descriptor, const Optional< LstmInputParamsInfo > &lstmParamsInfo, const Optional< QuantizedLstmInputParamsInfo > &, Optional< std::string & > reasonIfUnsupported) const override |
| Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. More...
|
|
bool | IsActivationSupported (const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsAdditionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsArgMinMaxSupported (const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBatchMatMulSupported (const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBatchNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsBatchToSpaceNdSupported (const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsCastSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsChannelShuffleSupported (const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsComparisonSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConcatSupported (const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConstantSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvertFp16ToFp32Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvertFp32ToFp16Supported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsConvolution3dSupported (const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDebugSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDepthToSpaceSupported (const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDequantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDilatedDepthwiseConvolutionSupported (const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDivisionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsElementwiseUnarySupported (const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFakeQuantizationSupported (const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFillSupported (const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFloorSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsFullyConnectedSupported (const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsGatherNdSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsGatherSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsInputSupported (const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsInstanceNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsL2NormalizationSupported (const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsLogicalBinarySupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const |
|
bool | IsLogSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const |
|
bool | IsLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMaximumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMeanSupported (const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMinimumSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMultiplicationSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsNormalizationSupported (const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsOutputSupported (const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPadSupported (const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPermuteSupported (const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPooling2dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPooling3dSupported (const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsQuantizeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsQLstmSupported (const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsRankSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsReduceSupported (const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsReshapeSupported (const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsResizeSupported (const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsReverseV2Supported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSliceSupported (const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSoftmaxSupported (const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSpaceToBatchNdSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSpaceToDepthSupported (const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSplitterSupported (const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsStackSupported (const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsStridedSliceSupported (const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsSubtractionSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsPreluSupported (const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsTileSupported (const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsTransposeConvolution2dSupported (const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsTransposeSupported (const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsUnidirectionalSequenceLstmSupported (const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsDetectionPostProcessSupported (const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMemCopySupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMemImportSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsMergeSupported (const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsQuantizedLstmSupported (const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsShapeSupported (const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
bool | IsStandInSupported (const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const StandInDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const |
|
Definition at line 12 of file RefLayerSupport.hpp.
◆ IsActivationSupported()
Definition at line 548 of file RefLayerSupport.cpp.
553 bool supported =
true;
556 std::array<DataType,6> supportedTypes = {
564 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
565 "Reference activation: input type not supported.");
567 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
568 "Reference activation: output type not supported.");
570 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
571 "Reference activation: input and output types mismatched.");
573 supported &=
CheckSupportRule(ShapesAreSameRank(input, output), reasonIfUnsupported,
574 "Reference activation: input and output shapes are of different rank.");
577 struct ActivationFunctionSupported :
public Rule
579 ActivationFunctionSupported(
const ActivationDescriptor& desc)
581 switch(desc.m_Function)
609 supported &=
CheckSupportRule(ActivationFunctionSupported(descriptor), reasonIfUnsupported,
610 "Reference activation: function not supported.");
References armnn::Abs, armnn::BoundedReLu, armnn::CheckSupportRule(), armnn::Elu, armnn::Float16, armnn::Float32, armnn::HardSwish, armnn::LeakyReLu, armnn::Linear, ActivationDescriptor::m_Function, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::ReLu, armnn::Sigmoid, armnn::SoftReLu, armnn::Sqrt, armnn::Square, and armnn::TanH.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsAdditionSupported()
Definition at line 615 of file RefLayerSupport.cpp.
620 bool supported =
true;
622 std::array<DataType,7> supportedTypes = {
631 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
632 "Reference addition: input 0 is not a supported type.");
634 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
635 "Reference addition: input 1 is not a supported type.");
637 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
638 "Reference addition: output is not a supported type.");
640 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
641 "Reference addition: input 0 and Input 1 types are mismatched");
643 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
644 "Reference addition: input and output types are mismatched");
646 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
647 "Reference addition: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsArgMinMaxSupported()
Definition at line 652 of file RefLayerSupport.cpp.
658 std::array<DataType, 8> supportedInputTypes =
669 std::array<DataType,2> supportedOutputTypes = {
674 bool supported =
true;
676 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
677 "Reference ArgMinMax: input is not a supported type.");
678 supported &=
CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
679 "Reference ArgMinMax: output type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::Signed32, and armnn::Signed64.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBatchMatMulSupported()
Definition at line 684 of file RefLayerSupport.cpp.
692 std::array<DataType, 6> supportedTypes =
701 bool supported =
true;
703 supported &=
CheckSupportRule(TypeAnyOf(inputX, supportedTypes), reasonIfUnsupported,
704 "Reference batch matrix multiplication: input X is not a supported type");
706 supported &=
CheckSupportRule(TypeAnyOf(inputY, supportedTypes), reasonIfUnsupported,
707 "Reference batch matrix multiplication: input Y is not a supported type");
709 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
710 "Reference batch matrix multiplication: output is not a supported type");
712 supported &=
CheckSupportRule(TypesAreEqual(inputX, inputY), reasonIfUnsupported,
713 "Reference batch matrix multiplication: input X and input Y types are mismatched");
715 supported &=
CheckSupportRule(TypesAreEqual(inputX, output), reasonIfUnsupported,
716 "Reference batch matrix multiplication: inputs and output types are mismatched");
718 supported &=
CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputX, 2),
720 "Reference batch matrix multiplication: input X is not of rank 2 or greater");
722 supported &=
CheckSupportRule(TensorNumDimensionsAreGreaterOrEqualTo(inputY, 2),
724 "Reference batch matrix multiplication: input Y is not of rank 2 or greater");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBatchNormalizationSupported()
Definition at line 729 of file RefLayerSupport.cpp.
740 std::array<DataType, 6> supportedTypes =
749 bool supported =
true;
751 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
752 "Reference batch normalization: input is not a supported type.");
754 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
755 "Reference batch normalization: output is not a supported type.");
757 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
758 "Reference batch normalization: input and output types are mismatched");
760 supported &=
CheckSupportRule(TypeAnyOf(mean, supportedTypes), reasonIfUnsupported,
761 "Reference batch normalization: mean is not a supported type.");
763 supported &=
CheckSupportRule(TypeAnyOf(variance, supportedTypes), reasonIfUnsupported,
764 "Reference batch normalization: variance is not a supported type.");
766 supported &=
CheckSupportRule(TypeAnyOf(beta, supportedTypes), reasonIfUnsupported,
767 "Reference batch normalization: beta is not a supported type.");
769 supported &=
CheckSupportRule(TypeAnyOf(gamma, supportedTypes), reasonIfUnsupported,
770 "Reference batch normalization: gamma is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsBatchToSpaceNdSupported()
Definition at line 775 of file RefLayerSupport.cpp.
782 bool supported =
true;
784 std::string batchToSpaceNdLayerStr =
"batchToSpaceNd";
785 std::string inputTensorStr =
"input";
786 std::string outputTensorStr =
"output";
789 std::array<DataType,6> supportedTypes =
798 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
799 "Reference BatchToSpaceNd: input type not supported.");
801 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
802 "Reference BatchToSpaceNd: output type not supported.");
804 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
805 "Reference BatchToSpaceNd: input and output types mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsCastSupported()
Definition at line 810 of file RefLayerSupport.cpp.
814 std::array<DataType, 9> supportedInputTypes =
825 bool supported =
true;
826 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
827 "Reference cast: input is not a supported type");
830 supported &=
CheckSupportRule(TypeAnyOf(output, supportedInputTypes), reasonIfUnsupported,
831 "Reference cast: output is not a supported type");
833 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
834 "Reference cast: input and output shapes have different number of total elements");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsChannelShuffleSupported()
Definition at line 839 of file RefLayerSupport.cpp.
845 bool supported =
true;
848 std::array<DataType, 7> supportedTypes =
858 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
859 "Reference ChannelShuffle: input is not a supported type.");
861 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
862 "Reference ChannelShuffle: output is not a supported type.");
864 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
865 "Reference ChannelShuffle: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsComparisonSupported()
Definition at line 871 of file RefLayerSupport.cpp.
878 std::array<DataType, 8> supportedInputTypes =
889 bool supported =
true;
890 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedInputTypes), reasonIfUnsupported,
891 "Reference comparison: input 0 is not a supported type");
893 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
894 "Reference comparison: input 0 and Input 1 types are mismatched");
897 "Reference comparison: output is not of type Boolean");
References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConcatSupported()
Definition at line 902 of file RefLayerSupport.cpp.
909 bool supported =
true;
910 std::array<DataType,7> supportedTypes =
920 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
921 "Reference concatenation: output type not supported");
922 for (
const TensorInfo* input : inputs)
925 supported &=
CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
926 "Reference concatenation: input type not supported");
928 supported &=
CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
929 "Reference concatenation: input and output types mismatched.");
References ARMNN_ASSERT, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConstantSupported()
Definition at line 935 of file RefLayerSupport.cpp.
938 std::array<DataType,8> supportedTypes =
949 return CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
950 "Reference constant: output is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConvertFp16ToFp32Supported()
◆ IsConvertFp32ToFp16Supported()
◆ IsConvolution2dSupported()
Definition at line 993 of file RefLayerSupport.cpp.
1000 bool supported =
true;
1003 std::array<DataType,7> supportedTypes =
1013 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1014 "Reference Convolution2d: input is not a supported type.");
1016 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1017 "Reference Convolution2d: output is not a supported type.");
1019 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1020 "Reference Convolution2d: input and output types mismatched.");
1023 const DataType inputType = input.GetDataType();
1026 std::array<DataType, 3> supportedWeightTypes =
1033 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1034 "Reference Convolution2d: weights type not supported for quantized input.");
1038 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1039 "Reference Convolution2d: weights is not a supported type.");
1041 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1042 "Reference Convolution2d: input and weights types mismatched.");
1045 if (biases.has_value())
1047 std::array<DataType,4> biasesSupportedTypes =
1054 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1055 "Reference Convolution2d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsConvolution3dSupported()
Definition at line 1062 of file RefLayerSupport.cpp.
1069 bool supported =
true;
1072 std::array<DataType,7> supportedTypes =
1082 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1083 "Reference Convolution3d: input is not a supported type.");
1085 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1086 "Reference Convolution3d: output is not a supported type.");
1088 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1089 "Reference Convolution3d: input and output types mismatched.");
1091 const DataType inputType = input.GetDataType();
1094 std::array<DataType, 3> supportedWeightTypes =
1101 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1102 "Reference Convolution3d: weights type not supported for quantized input.");
1106 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1107 "Reference Convolution3d: weights is not a supported type.");
1109 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1110 "Reference Convolution3d: input and weights types mismatched.");
1113 if (biases.has_value())
1115 std::array<DataType,4> biasesSupportedTypes =
1122 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1123 "Reference Convolution3d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDebugSupported()
Definition at line 1130 of file RefLayerSupport.cpp.
1134 bool supported =
true;
1136 std::array<DataType, 8> supportedTypes =
1148 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1149 "Reference for Debug layer: input type not supported");
1151 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1152 "Reference for Debug layer: output type not supported");
1154 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1155 "Reference for Debug layer: input and output types are mismatched");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDepthToSpaceSupported()
Definition at line 1160 of file RefLayerSupport.cpp.
1166 bool supported =
true;
1168 std::array<DataType,6> supportedTypes =
1177 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1178 "Reference DepthToSpace: input type not supported");
1180 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1181 "Reference DepthToSpace: output type not supported");
1183 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1184 "Reference DepthToSpace: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDepthwiseConvolutionSupported()
Definition at line 1189 of file RefLayerSupport.cpp.
1197 bool supported =
true;
1200 std::array<DataType,7> supportedTypes =
1210 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1211 "Reference DepthwiseConvolution2d: input is not a supported type.");
1213 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1214 "Reference DepthwiseConvolution2d: output is not a supported type.");
1216 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1217 "Reference DepthwiseConvolution2d: input and output types mismatched.");
1219 const DataType inputType = input.GetDataType();
1222 std::array<DataType, 3> supportedWeightTypes =
1229 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
1230 "Reference DepthwiseConvolution2d: weights type not supported for "
1231 "quantized input.");
1235 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1236 "Reference DepthwiseConvolution2d: weights is not a supported type.");
1238 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1239 "Reference DepthwiseConvolution2d: input and weights types mismatched.");
1242 if (biases.has_value())
1244 std::array<DataType,4> biasesSupportedTypes =
1250 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
1251 "Reference DepthwiseConvolution2d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsDilatedDepthwiseConvolutionSupported(), and RefLayerSupport::IsLayerSupported().
◆ IsDequantizeSupported()
Definition at line 1258 of file RefLayerSupport.cpp.
1262 bool supported =
true;
1264 std::array<DataType,5> supportedInputTypes = {
1272 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
1273 "Reference for Dequantize layer: input type not supported.");
1275 supported &=
CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
1276 "Reference for Dequantize layer: per-axis quantized input not supported.");
1278 std::array<DataType,3> supportedOutputTypes = {
1283 supported &=
CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
1284 "Reference for Dequantize layer: output type not supported.");
1286 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1287 "Reference for Dequantize layer: input/output shapes have different num total "
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDetectionPostProcessSupported()
Definition at line 1293 of file RefLayerSupport.cpp.
1303 IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
1305 bool supported =
true;
1307 std::array<DataType,6> supportedInputTypes =
1316 supported &=
CheckSupportRule(TypeAnyOf(boxEncodings, supportedInputTypes), reasonIfUnsupported,
1317 "Reference DetectionPostProcess: input 0 is not a supported type.");
1319 supported &=
CheckSupportRule(TypeAnyOf(scores, supportedInputTypes), reasonIfUnsupported,
1320 "Reference DetectionPostProcess: input 1 is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsDilatedDepthwiseConvolutionSupported()
◆ IsDivisionSupported()
Definition at line 1335 of file RefLayerSupport.cpp.
1340 bool supported =
true;
1342 std::array<DataType,7> supportedTypes = {
1351 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1352 "Reference division: input 0 is not a supported type.");
1354 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1355 "Reference division: input 1 is not a supported type.");
1357 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1358 "Reference division: output is not a supported type.");
1360 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1361 "Reference division: input 0 and Input 1 types are mismatched");
1363 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1364 "Reference division: input and output types are mismatched");
1366 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1367 "Reference division: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsElementwiseUnarySupported()
Definition at line 1372 of file RefLayerSupport.cpp.
1379 std::array<DataType, 7> supportedTypes =
1389 std::array<DataType, 1> logicalSupportedTypes =
1394 bool supported =
true;
1398 supported &=
CheckSupportRule(TypeAnyOf(input, logicalSupportedTypes), reasonIfUnsupported,
1399 "Reference elementwise unary: input type not supported");
1401 supported &=
CheckSupportRule(TypeAnyOf(output, logicalSupportedTypes), reasonIfUnsupported,
1402 "Reference elementwise unary: output type not supported");
1406 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1407 "Reference elementwise unary: input type not supported");
1409 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1410 "Reference elementwise unary: output type not supported");
1413 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1414 "Reference elementwise unary: input and output types not matching");
1416 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1417 "Reference elementwise unary: input and output shapes"
1418 "have different number of total elements");
References armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::LogicalNot, ElementwiseUnaryDescriptor::m_Operation, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsFakeQuantizationSupported()
◆ IsFillSupported()
◆ IsFloorSupported()
Definition at line 1466 of file RefLayerSupport.cpp.
1471 bool supported =
true;
1473 std::array<DataType,3> supportedTypes =
1479 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1480 "Reference Floor: input type not supported.");
1482 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1483 "Reference Floor: output type not supported.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsFullyConnectedSupported()
Definition at line 1488 of file RefLayerSupport.cpp.
1495 bool supported =
true;
1498 std::array<DataType,6> supportedTypes =
1507 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1508 "Reference Fully Connected: input type not supported.");
1510 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1511 "Reference Fully Connected: output type not supported.");
1513 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1514 "Reference Fully Connected: weights type not supported.");
1516 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1517 "Reference Fully Connected: input and output types mismatched.");
1519 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
1520 "Reference Fully Connected: weights is not a supported type.");
1522 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
1523 "Reference Fully Connected: input and weights types mismatched.");
1525 if (descriptor.m_BiasEnabled)
1528 std::array<DataType, 5>
1529 supportedBiasTypes =
1537 supported &=
CheckSupportRule(TypeAnyOf(biases, supportedBiasTypes), reasonIfUnsupported,
1538 "Reference Fully Connected: bias type not supported.");
1540 supported &=
CheckSupportRule(BiasAndWeightsTypesMatch(biases, weights), reasonIfUnsupported,
1541 "Reference Fully Connected: bias and weight types mismatch.");
1543 supported &=
CheckSupportRule(BiasAndWeightsTypesCompatible(weights, supportedBiasTypes), reasonIfUnsupported,
1544 "Reference Fully Connected: bias type inferred from weights is incompatible.");
1546 supported &=
CheckSupportRule(TensorNumDimensionsAreCorrect(biases, 1U), reasonIfUnsupported,
1547 "Reference Fully Connected: bias must have 1 dimension.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, FullyConnectedDescriptor::m_BiasEnabled, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsGatherNdSupported()
Definition at line 1554 of file RefLayerSupport.cpp.
1559 bool supported =
true;
1560 std::array<DataType,7> supportedTypes =
1570 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1571 "Reference GatherNd: input type not supported");
1573 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1574 "Reference GatherNd: output type not supported");
1577 "Reference GatherNd: indices (input1) type not supported");
1579 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1580 "Reference GatherNd: input and output types not matching");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsGatherSupported()
Definition at line 1585 of file RefLayerSupport.cpp.
1591 bool supported =
true;
1592 std::array<DataType,7> supportedTypes =
1603 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1604 "Reference Gather: input type not supported");
1606 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1607 "Reference Gather: output type not supported");
1610 "Reference Gather: indices (input1) type not supported");
1612 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1613 "Reference Gather: input and output types not matching");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsInputSupported()
◆ IsInstanceNormalizationSupported()
Definition at line 1624 of file RefLayerSupport.cpp.
1631 std::array<DataType, 3> supportedTypes =
1637 bool supported =
true;
1639 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1640 "Reference Instance Normalization: input type not supported.");
1642 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1643 "Reference Instance Normalization: output type not supported.");
1645 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1646 "Reference Instance Normalization: input and output types mismatched.");
1648 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1649 "Reference Instance Normalization: input and output shapes have different "
1650 "num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsL2NormalizationSupported()
Definition at line 1655 of file RefLayerSupport.cpp.
1662 std::array<DataType, 6> supportedTypes =
1671 bool supported =
true;
1673 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1674 "Reference L2normalization: input type not supported.");
1676 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1677 "Reference L2normalization: output type not supported.");
1679 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1680 "Reference L2normalization: input and output types mismatched.");
1682 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
1683 "Reference L2normalization: input and output shapes have different "
1684 "num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsLayerSupported()
Default implementation of the ILayerSupport interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported.
Reimplemented from LayerSupportBase.
Definition at line 61 of file RefLayerSupport.cpp.
73 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
80 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
86 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
95 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
101 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
102 reasonIfUnsupported);
107 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
108 reasonIfUnsupported);
111 std::vector<const TensorInfo*> inputInfos;
112 for (uint32_t i = 0; i < (infos.size() - 1); i++)
114 inputInfos.push_back(&infos[i]);
117 infos[infos.size() - 1],
118 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
119 reasonIfUnsupported);
129 if (infos.size() != 4)
131 throw InvalidArgumentException(
"Invalid number of Convolution2d TensorInfos. "
132 "TensorInfos should be of format: {input, output, weights, biases}.");
135 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
136 if (infos[3] == TensorInfo())
143 reasonIfUnsupported);
152 reasonIfUnsupported);
158 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
159 reasonIfUnsupported);
162 if (infos.size() != 4)
164 throw InvalidArgumentException(
"Invalid number of DepthwiseConvolution2d TensorInfos. "
165 "TensorInfos should be of format: {input, output, weights, biases}.");
168 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
169 if (infos[3] == TensorInfo())
176 reasonIfUnsupported);
185 reasonIfUnsupported);
194 std::array<DataType, 7> supportedTypes =
204 bool supported =
true;
205 supported &=
CheckSupportRule(TypeAnyOf(infos[0], supportedTypes), reasonIfUnsupported,
206 "Reference elementwise unary: input type not supported");
208 supported &=
CheckSupportRule(TypeAnyOf(infos[1], supportedTypes), reasonIfUnsupported,
209 "Reference elementwise unary: input type not supported");
211 supported &=
CheckSupportRule(TypeAnyOf(infos[2], supportedTypes), reasonIfUnsupported,
212 "Reference elementwise unary: output type not supported");
214 supported &=
CheckSupportRule(TypesAreEqual(infos[0], infos[1]), reasonIfUnsupported,
215 "Reference elementwise unary: input types not matching");
217 supported &=
CheckSupportRule(TypesAreEqual(infos[0], infos[2]), reasonIfUnsupported,
218 "Reference elementwise unary: input and output types not matching");
225 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
226 reasonIfUnsupported);
230 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
231 reasonIfUnsupported);
239 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
240 reasonIfUnsupported);
245 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
246 reasonIfUnsupported);
251 reasonIfUnsupported);
257 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
259 reasonIfUnsupported);
263 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
264 reasonIfUnsupported);
269 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
270 reasonIfUnsupported);
274 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
275 reasonIfUnsupported);
284 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
285 lstmParamsInfo.value(),
286 reasonIfUnsupported);
294 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
295 lstmParamsInfo.value(),
296 reasonIfUnsupported);
302 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
303 reasonIfUnsupported);
311 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
312 reasonIfUnsupported);
318 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
319 reasonIfUnsupported);
323 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
324 reasonIfUnsupported);
328 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
329 reasonIfUnsupported);
337 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
338 reasonIfUnsupported);
342 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
343 reasonIfUnsupported);
348 reasonIfUnsupported);
352 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
353 reasonIfUnsupported);
357 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
358 reasonIfUnsupported);
362 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
363 reasonIfUnsupported);
367 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
368 reasonIfUnsupported);
372 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
373 reasonIfUnsupported);
376 std::vector<TensorInfo> outputInfos;
377 for (uint32_t i = 1; i < infos.size(); i++)
379 outputInfos.push_back(infos[i]);
382 {outputInfos.begin(), outputInfos.end()},
383 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
384 reasonIfUnsupported);
388 std::vector<const TensorInfo*> inputInfos;
389 for (uint32_t i = 0; i < infos.size() - 1; i++)
391 inputInfos.push_back(&infos[i]);
394 infos[infos.size() - 1],
395 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
396 reasonIfUnsupported);
401 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
402 reasonIfUnsupported);
408 *(PolymorphicDowncast<const TileDescriptor*>(&descriptor)),
409 reasonIfUnsupported);
413 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
414 reasonIfUnsupported);
417 if (infos.size() != 4)
419 throw InvalidArgumentException(
"Invalid number of TransposeConvolution2d TensorInfos. "
420 "TensorInfos should be of format: {input, output, weights, biases}.");
423 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
424 if (infos[3] == TensorInfo())
431 reasonIfUnsupported);
440 reasonIfUnsupported);
448 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
449 reasonIfUnsupported);
452 if (infos.size() != 4)
454 throw InvalidArgumentException(
"Invalid number of Convolution3d TensorInfos. "
455 "TensorInfos should be of format: {input, output, weights, biases}.");
458 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
459 if (infos[3] == TensorInfo())
466 reasonIfUnsupported);
475 reasonIfUnsupported);
488 *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
490 reasonIfUnsupported);
493 *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
494 reasonIfUnsupported);
503 if (infos.size() != 6)
505 throw InvalidArgumentException(
"Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
506 "should be of format: {input, outputStateIn, cellStateIn, "
507 "hiddenStateOutputVal, cellStateOutputVal, output}");
509 auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
517 lstmParamsInfo.value(),
518 reasonIfUnsupported);
523 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
524 reasonIfUnsupported);
539 quantizedLstmInputParamsInfo.value(),
540 reasonIfUnsupported);
References armnn::Activation, armnn::Addition, armnn::ArgMinMax, armnn::BatchMatMul, armnn::BatchNormalization, armnn::BatchToSpaceNd, armnn::Cast, armnn::ChannelShuffle, armnn::CheckSupportRule(), armnn::Comparison, armnn::Concat, armnn::Constant, armnn::ConvertFp16ToFp32, armnn::ConvertFp32ToFp16, armnn::Convolution2d, armnn::Convolution3d, armnn::Debug, armnn::DepthToSpace, armnn::DepthwiseConvolution2d, armnn::Dequantize, armnn::DetectionPostProcess, armnn::Division, armnn::ElementwiseBinary, armnn::ElementwiseUnary, armnn::FakeQuantization, armnn::Fill, armnn::Float16, armnn::Float32, armnn::Floor, armnn::FullyConnected, armnn::Gather, armnn::GatherNd, armnn::Input, armnn::InstanceNormalization, RefLayerSupport::IsActivationSupported(), RefLayerSupport::IsAdditionSupported(), RefLayerSupport::IsArgMinMaxSupported(), RefLayerSupport::IsBatchMatMulSupported(), RefLayerSupport::IsBatchNormalizationSupported(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsCastSupported(), RefLayerSupport::IsChannelShuffleSupported(), RefLayerSupport::IsComparisonSupported(), RefLayerSupport::IsConcatSupported(), RefLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDebugSupported(), RefLayerSupport::IsDepthToSpaceSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), RefLayerSupport::IsDequantizeSupported(), RefLayerSupport::IsDetectionPostProcessSupported(), RefLayerSupport::IsDivisionSupported(), RefLayerSupport::IsElementwiseUnarySupported(), RefLayerSupport::IsFakeQuantizationSupported(), RefLayerSupport::IsFillSupported(), RefLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), RefLayerSupport::IsGatherNdSupported(), RefLayerSupport::IsGatherSupported(), RefLayerSupport::IsInputSupported(), RefLayerSupport::IsInstanceNormalizationSupported(), RefLayerSupport::IsL2NormalizationSupported(), RefLayerSupport::IsLogicalBinarySupported(), RefLayerSupport::IsLogSoftmaxSupported(), RefLayerSupport::IsLstmSupported(), RefLayerSupport::IsMaximumSupported(), RefLayerSupport::IsMeanSupported(), RefLayerSupport::IsMemCopySupported(), LayerSupportBase::IsMemImportSupported(), LayerSupportBase::IsMergeSupported(), RefLayerSupport::IsMinimumSupported(), RefLayerSupport::IsMultiplicationSupported(), RefLayerSupport::IsNormalizationSupported(), RefLayerSupport::IsOutputSupported(), RefLayerSupport::IsPadSupported(), RefLayerSupport::IsPermuteSupported(), RefLayerSupport::IsPooling2dSupported(), RefLayerSupport::IsPooling3dSupported(), RefLayerSupport::IsPreluSupported(), RefLayerSupport::IsQLstmSupported(), LayerSupportBase::IsQuantizedLstmSupported(), RefLayerSupport::IsQuantizeSupported(), RefLayerSupport::IsRankSupported(), RefLayerSupport::IsReduceSupported(), RefLayerSupport::IsReshapeSupported(), RefLayerSupport::IsResizeSupported(), RefLayerSupport::IsReverseV2Supported(), RefLayerSupport::IsShapeSupported(), RefLayerSupport::IsSliceSupported(), RefLayerSupport::IsSoftmaxSupported(), RefLayerSupport::IsSpaceToBatchNdSupported(), RefLayerSupport::IsSpaceToDepthSupported(), RefLayerSupport::IsSplitterSupported(), RefLayerSupport::IsStackSupported(), RefLayerSupport::IsStridedSliceSupported(), RefLayerSupport::IsSubtractionSupported(), RefLayerSupport::IsTileSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), RefLayerSupport::IsTransposeSupported(), RefLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::L2Normalization, armnn::LogicalBinary, armnn::LogSoftmax, armnn::Lstm, armnn::Map, armnn::Maximum, armnn::Mean, armnn::MemCopy, armnn::MemImport, armnn::Merge, armnn::Minimum, armnn::Multiplication, armnn::Normalization, armnn::Output, armnn::Pad, armnn::Permute, armnn::Pooling2d, armnn::Pooling3d, armnn::Prelu, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QLstm, armnn::QSymmS16, armnn::Quantize, armnn::QuantizedLstm, armnn::Rank, armnn::Reduce, armnn::Reshape, armnn::Resize, armnn::ReverseV2, armnn::Shape, armnn::Signed32, armnn::Slice, armnn::Softmax, armnn::SpaceToBatchNd, armnn::SpaceToDepth, armnn::Splitter, armnn::Stack, armnn::StridedSlice, armnn::Subtraction, armnn::Tile, armnn::Transpose, armnn::TransposeConvolution2d, armnn::UnidirectionalSequenceLstm, armnn::Unmap, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
◆ IsLogicalBinarySupported()
Definition at line 1689 of file RefLayerSupport.cpp.
1697 std::array<DataType, 1> supportedTypes =
1702 bool supported =
true;
1703 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1704 "Reference LogicalBinary: input 0 type not supported");
1705 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1706 "Reference LogicalBinary: input 1 type not supported");
1708 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1709 "Reference LogicalBinary: input and output types do not match");
References armnn::Boolean, armnn::CheckSupportRule(), and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsLogSoftmaxSupported()
Definition at line 1714 of file RefLayerSupport.cpp.
1721 std::array<DataType, 3> supportedTypes =
1727 bool supported =
true;
1728 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1729 "Reference LogSoftmax: input type not supported");
1731 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1732 "Reference LogSoftmax: output type not supported");
1734 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1735 "Reference LogSoftmax: input and output types do not match");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, and armnn::IgnoreUnused().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsLstmSupported()
Definition at line 1740 of file RefLayerSupport.cpp.
1754 bool supported =
true;
1756 std::array<DataType,3> supportedTypes = {
1762 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1763 "Reference Lstm: input is not a supported type.");
1764 supported &=
CheckSupportRule(TypesAreEqual(input, outputStateIn), reasonIfUnsupported,
1765 "Reference Lstm: input and outputStateIn types are mismatched");
1766 supported &=
CheckSupportRule(TypesAreEqual(input, cellStateIn), reasonIfUnsupported,
1767 "Reference Lstm: input and cellStateIn types are mismatched");
1768 supported &=
CheckSupportRule(TypesAreEqual(input, scratchBuffer), reasonIfUnsupported,
1769 "Reference Lstm: input and scratchBuffer types are mismatched");
1770 supported &=
CheckSupportRule(TypesAreEqual(input, outputStateOut), reasonIfUnsupported,
1771 "Reference Lstm: input and outputStateOut types are mismatched");
1772 supported &=
CheckSupportRule(TypesAreEqual(input, cellStateOut), reasonIfUnsupported,
1773 "Reference Lstm: input and cellStateOut types are mismatched");
1775 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1776 "Reference Lstm: input and output types are mismatched");
1778 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToForgetWeights()), reasonIfUnsupported,
1779 "Reference Lstm: input and InputToForgetWeights types are mismatched");
1780 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToCellWeights()), reasonIfUnsupported,
1781 "Reference Lstm: input and InputToCellWeights types are mismatched");
1782 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToOutputWeights()), reasonIfUnsupported,
1783 "Reference Lstm: input and InputToOutputWeights types are mismatched");
1784 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToForgetWeights()), reasonIfUnsupported,
1785 "Reference Lstm: input and RecurrentToForgetWeights types are mismatched");
1786 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToCellWeights()), reasonIfUnsupported,
1787 "Reference Lstm: input and RecurrentToCellWeights types are mismatched");
1788 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToOutputWeights()), reasonIfUnsupported,
1789 "Reference Lstm: input and RecurrentToOutputWeights types are mismatched");
1790 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetGateBias()), reasonIfUnsupported,
1791 "Reference Lstm: input and ForgetGateBias types are mismatched");
1792 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellBias()), reasonIfUnsupported,
1793 "Reference Lstm: input and CellBias types are mismatched");
1794 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputGateBias()), reasonIfUnsupported,
1795 "Reference Lstm: input and OutputGateBias types are mismatched");
1796 if (!descriptor.m_CifgEnabled)
1798 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputToInputWeights()), reasonIfUnsupported,
1799 "Reference Lstm: input and InputToInputWeights types are mismatched");
1800 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetRecurrentToInputWeights()),
1801 reasonIfUnsupported,
1802 "Reference Lstm: input and RecurrentToInputWeights types are mismatched");
1803 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputGateBias()), reasonIfUnsupported,
1804 "Reference Lstm: input and InputGateBias types are mismatched");
1805 if (descriptor.m_PeepholeEnabled)
1807 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToInputWeights()),
1808 reasonIfUnsupported,
1809 "Reference Lstm: input and CellToInputWeights types are mismatched");
1812 if (descriptor.m_PeepholeEnabled)
1814 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToForgetWeights()), reasonIfUnsupported,
1815 "Reference Lstm: input and CellToForgetWeights types are mismatched");
1816 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellToOutputWeights()), reasonIfUnsupported,
1817 "Reference Lstm: input and CellToOutputWeights types are mismatched");
1819 if (descriptor.m_ProjectionEnabled)
1821 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionWeights()), reasonIfUnsupported,
1822 "Reference Lstm: input and mProjectionWeights types are mismatched");
1823 if (paramsInfo.m_ProjectionBias !=
nullptr)
1825 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
1826 "Reference Lstm: input and ProjectionBias types are mismatched");
1829 if (descriptor.m_LayerNormEnabled)
1831 if (!descriptor.m_CifgEnabled)
1833 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetInputLayerNormWeights()),
1834 reasonIfUnsupported,
1835 "Reference Lstm: input and InputLayerNormWeights types are mismatched");
1837 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetForgetLayerNormWeights()),
1838 reasonIfUnsupported,
1839 "Reference Lstm: input and ForgetLayerNormWeights types are mismatched");
1840 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetCellLayerNormWeights()),
1841 reasonIfUnsupported,
1842 "Reference Lstm: input and CellLayerNormWeights types are mismatched");
1843 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetOutputLayerNormWeights()),
1844 reasonIfUnsupported,
1845 "Reference Lstm: input and OutputLayerNormWeights types are mismatched");
References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMaximumSupported()
Definition at line 1851 of file RefLayerSupport.cpp.
1856 bool supported =
true;
1858 std::array<DataType,7> supportedTypes = {
1867 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1868 "Reference maximum: input 0 is not a supported type.");
1870 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1871 "Reference maximum: input 1 is not a supported type.");
1873 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1874 "Reference maximum: output is not a supported type.");
1876 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
1877 "Reference maximum: input 0 and Input 1 types are mismatched");
1879 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
1880 "Reference maximum: input and output types are mismatched");
1882 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
1883 "Reference maximum: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMeanSupported()
Definition at line 1888 of file RefLayerSupport.cpp.
1893 bool supported =
true;
1894 std::string meanLayerStr =
"Mean";
1895 std::string outputTensorStr =
"output";
1897 std::array<DataType,6> supportedTypes =
1906 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1907 "Reference Mean: input type not supported.");
1909 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1910 "Reference Mean: input and output types are mismatched");
1912 if (descriptor.m_KeepDims)
1914 supported &=
CheckSupportRule(TensorNumDimensionsAreCorrect(output, input.GetNumDimensions()),
1915 reasonIfUnsupported,
1916 CreateIncorrectDimensionsErrorMsg(input.GetNumDimensions(),
1917 output.GetNumDimensions(),
1918 meanLayerStr, outputTensorStr).data());
1920 else if (descriptor.m_Axis.empty())
1923 reasonIfUnsupported,
1924 CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1925 meanLayerStr, outputTensorStr).data());
1929 auto outputDim = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(descriptor.m_Axis.size());
1933 supported &=
CheckSupportRule(TensorNumDimensionsAreCorrect(output, outputDim),
1934 reasonIfUnsupported,
1935 CreateIncorrectDimensionsErrorMsg(outputDim, output.GetNumDimensions(),
1936 meanLayerStr, outputTensorStr).data());
1941 reasonIfUnsupported,
1942 CreateIncorrectDimensionsErrorMsg(1, output.GetNumDimensions(),
1943 meanLayerStr, outputTensorStr).data());
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetNumDimensions(), MeanDescriptor::m_Axis, MeanDescriptor::m_KeepDims, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMemCopySupported()
Definition at line 1950 of file RefLayerSupport.cpp.
1954 bool supported =
true;
1956 std::array<DataType,7> supportedTypes =
1967 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
1968 "Reference MemCopy: input type not supported");
1970 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
1971 "Reference MemCopy: output type not supported");
1973 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
1974 "Reference MemCopy: input and output types are mismatched");
References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMinimumSupported()
Definition at line 1979 of file RefLayerSupport.cpp.
1984 bool supported =
true;
1986 std::array<DataType,7> supportedTypes = {
1995 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
1996 "Reference minimum: input 0 is not a supported type.");
1998 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
1999 "Reference minimum: input 1 is not a supported type.");
2001 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2002 "Reference minimum: output is not a supported type.");
2004 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2005 "Reference minimum: input 0 and Input 1 types are mismatched");
2007 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2008 "Reference minimum: input and output types are mismatched");
2010 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2011 "Reference minimum: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsMultiplicationSupported()
Definition at line 2016 of file RefLayerSupport.cpp.
2021 bool supported =
true;
2023 std::array<DataType,7> supportedTypes = {
2032 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2033 "Reference multiplication: input 0 is not a supported type.");
2035 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2036 "Reference multiplication: input 1 is not a supported type.");
2038 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2039 "Reference multiplication: output is not a supported type.");
2041 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2042 "Reference multiplication: input 0 and Input 1 types are mismatched");
2044 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2045 "Reference multiplication: input and output types are mismatched");
2047 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2048 "Reference multiplication: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsNormalizationSupported()
Definition at line 2053 of file RefLayerSupport.cpp.
2061 std::array<DataType, 6> supportedTypes =
2070 bool supported =
true;
2072 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2073 "Reference normalization: input type not supported.");
2075 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2076 "Reference normalization: output type not supported.");
2078 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2079 "Reference normalization: input and output shapes have different "
2080 "num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsOutputSupported()
◆ IsPadSupported()
Definition at line 2091 of file RefLayerSupport.cpp.
2097 bool supported =
true;
2100 std::array<DataType,6> supportedTypes =
2109 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2110 "Reference pad: input is not a supported type.");
2112 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2113 "Reference pad: output is not a supported type.");
2115 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2116 "Reference pad: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPermuteSupported()
Definition at line 2121 of file RefLayerSupport.cpp.
2127 bool supported =
true;
2130 std::array<DataType, 6> supportedTypes =
2140 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2141 "Reference permute: input is not a supported type.");
2143 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2144 "Reference permute: output is not a supported type.");
2146 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2147 "Reference permute: input and output types are mismatched.");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPooling2dSupported()
Definition at line 2152 of file RefLayerSupport.cpp.
2158 bool supported =
true;
2161 std::array<DataType,6> supportedTypes =
2170 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2171 "Reference poolind2d: input is not a supported type.");
2173 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2174 "Reference poolind2d: output is not a supported type.");
2176 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2177 "Reference poolind2d: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPooling3dSupported()
Definition at line 2182 of file RefLayerSupport.cpp.
2188 bool supported =
true;
2191 std::array<DataType,6> supportedTypes =
2200 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2201 "Reference poolind3d: input is not a supported type.");
2203 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2204 "Reference poolind3d: output is not a supported type.");
2206 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2207 "Reference poolind3d: input and output types are mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsPreluSupported()
Definition at line 2670 of file RefLayerSupport.cpp.
2675 bool supported =
true;
2677 std::array<DataType, 6> supportedTypes
2686 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2687 "PReLU: input is not a supported type.");
2689 supported &=
CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported,
2690 "PReLU: alpha is not a supported type.");
2692 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2693 "PReLU: output is not a supported type.");
2695 supported &=
CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported,
2696 "PReLU: input, alpha and output types are mismatched");
2698 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported,
2699 "PReLU: shapes are not suitable for implicit broadcast");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsQLstmSupported()
◆ IsQuantizeSupported()
Definition at line 2237 of file RefLayerSupport.cpp.
2241 bool supported =
true;
2244 std::array<DataType,7> supportedInputTypes = {
2253 supported &=
CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
2254 "Reference quantize: input type not supported.");
2257 std::array<DataType,4> supportedOutputTypes = {
2263 supported &=
CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
2264 "Reference quantize: output type not supported.");
2266 supported &=
CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
2267 "Reference quantize: input and output shapes have different num total elements.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsRankSupported()
◆ IsReduceSupported()
Definition at line 2287 of file RefLayerSupport.cpp.
2293 bool supported =
true;
2294 std::array<DataType,7> supportedTypes =
2304 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2305 "Reference Reduce: input type not supported");
2307 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2308 "Reference Reduce: output type not supported");
2310 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2311 "Reference Reduce: input and output types not matching");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsReshapeSupported()
Definition at line 2316 of file RefLayerSupport.cpp.
2324 std::array<DataType,8> supportedOutputTypes =
2336 return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
2337 "Reference reshape: input type not supported.");
References armnn::BFloat16, armnn::Boolean, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsResizeSupported()
Definition at line 2340 of file RefLayerSupport.cpp.
2346 bool supported =
true;
2347 std::array<DataType,6> supportedTypes =
2357 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2358 "Reference Resize: input type not supported");
2360 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2361 "Reference Resize: output type not supported");
2363 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2364 "Reference Resize: input and output types not matching");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsReverseV2Supported()
Definition at line 2369 of file RefLayerSupport.cpp.
2374 bool supported =
true;
2376 std::array<DataType,8> supportedTypes =
2388 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2389 "Reference ReverseV2: input0 type not supported");
2391 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2392 "Reference ReverseV2: output type not supported");
2394 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2395 "Reference ReverseV2: input0 and output types not matching");
2397 std::array<DataType,6> input2SupportedTypes =
2402 supported &=
CheckSupportRule(TypeAnyOf(input1, input2SupportedTypes), reasonIfUnsupported,
2403 "Reference ReverseV2: input1 type not supported");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsShapeSupported()
◆ IsSliceSupported()
Definition at line 2426 of file RefLayerSupport.cpp.
2432 bool supported =
true;
2434 std::array<DataType, 5> supportedTypes =
2443 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2444 "Reference Slice: input type not supported");
2446 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2447 "Reference Slice: output type not supported");
2449 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2450 "Reference Slice: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSoftmaxSupported()
Definition at line 2455 of file RefLayerSupport.cpp.
2461 bool supported =
true;
2462 std::array<DataType,7> supportedTypes =
2472 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2473 "Reference Softmax: output type not supported");
2475 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2476 "Reference Softmax: input type not supported");
2478 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2479 "Reference Softmax: input type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSpaceToBatchNdSupported()
Definition at line 2484 of file RefLayerSupport.cpp.
2490 bool supported =
true;
2491 std::array<DataType,6> supportedTypes =
2500 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2501 "Reference SpaceToBatchNd: input type not supported");
2503 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2504 "Reference SpaceToBatchNd: output type not supported");
2506 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2507 "Reference SpaceToBatchNd: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSpaceToDepthSupported()
Definition at line 2512 of file RefLayerSupport.cpp.
2519 bool supported =
true;
2521 std::array<DataType,6> supportedTypes =
2530 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2531 "Reference SpaceToDepth: input type not supported");
2533 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2534 "Reference SpaceToDepth: output type not supported");
2536 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2537 "Reference SpaceToDepth: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSplitterSupported()
Definition at line 2542 of file RefLayerSupport.cpp.
2548 bool supported =
true;
2549 std::array<DataType,6> supportedTypes =
2558 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2559 "Reference splitter: output type not supported");
2560 for (
const TensorInfo& output : outputs)
2562 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2563 "Reference splitter: input type not supported");
2565 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2566 "Reference splitter: input and output types mismatched.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsStackSupported()
Definition at line 2572 of file RefLayerSupport.cpp.
2579 bool supported =
true;
2580 std::array<DataType,7> supportedTypes =
2590 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2591 "Reference stack: output type not supported");
2592 for (
const TensorInfo* input : inputs)
2595 supported &=
CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
2596 "Reference stack: input type not supported");
2598 supported &=
CheckSupportRule(TypesAreEqual(*input, output), reasonIfUnsupported,
2599 "Reference stack: input and output types mismatched.");
References ARMNN_ASSERT, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsStridedSliceSupported()
Definition at line 2605 of file RefLayerSupport.cpp.
2611 bool supported =
true;
2613 std::array<DataType,5> supportedTypes =
2621 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2622 "Reference StridedSlice: input type not supported");
2624 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2625 "Reference StridedSlice: output type not supported");
2627 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2628 "Reference StridedSlice: input and output types are mismatched");
References armnn::CheckSupportRule(), armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsSubtractionSupported()
Definition at line 2633 of file RefLayerSupport.cpp.
2638 bool supported =
true;
2640 std::array<DataType,7> supportedTypes = {
2649 supported &=
CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
2650 "Reference subtraction: input 0 is not a supported type.");
2652 supported &=
CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
2653 "Reference subtraction: input 1 is not a supported type.");
2655 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2656 "Reference subtraction: output is not a supported type.");
2658 supported &=
CheckSupportRule(TypesAreEqual(input0, input1), reasonIfUnsupported,
2659 "Reference subtraction: input 0 and Input 1 types are mismatched");
2661 supported &=
CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
2662 "Reference subtraction: input and output types are mismatched");
2664 supported &=
CheckSupportRule(ShapesAreBroadcastCompatible(input0, input1, output), reasonIfUnsupported,
2665 "Reference subtraction: shapes are not suitable for implicit broadcast.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsTileSupported()
Definition at line 2704 of file RefLayerSupport.cpp.
2711 bool supported =
true;
2713 std::array<DataType, 7> supportedTypes
2724 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2725 "Tile: input type not supported.");
2727 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2728 "Tile: output type not supported");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsTransposeConvolution2dSupported()
Definition at line 2733 of file RefLayerSupport.cpp.
2741 bool supported =
true;
2743 std::array<DataType,7> supportedTypes =
2753 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2754 "Reference TransposeConvolution2d: input is not a supported type.");
2756 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2757 "Reference TransposeConvolution2d: output is not a supported type.");
2759 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2760 "Reference TransposeConvolution2d: input and output types mismatched.");
2763 const DataType inputType = input.GetDataType();
2766 std::array<DataType, 3> supportedWeightTypes =
2773 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
2774 "Reference TransposeConvolution2d: weights type not supported for "
2775 "quantized input.");
2779 supported &=
CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
2780 "Reference TransposeConvolution2d: weights is not a supported type.");
2782 supported &=
CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
2783 "Reference TransposeConvolution2d: input and weights types mismatched.");
2786 if (biases.has_value())
2788 std::array<DataType,4> biasesSupportedTypes =
2794 supported &=
CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
2795 "Reference TransposeConvolution2d: biases is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), OptionalBase::has_value(), armnn::IgnoreUnused(), armnn::IsQuantized8BitType(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Signed32, and OptionalReferenceSwitch< std::is_reference< T >::value, T >::value().
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsTransposeSupported()
Definition at line 2801 of file RefLayerSupport.cpp.
2807 bool supported =
true;
2810 std::array<DataType, 6> supportedTypes =
2820 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2821 "Reference transpose: input is not a supported type.");
2823 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2824 "Reference transpose: output is not a supported type.");
2826 supported &=
CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
2827 "Reference transpose: input and output types are mismatched.");
References armnn::BFloat16, armnn::CheckSupportRule(), armnn::Float16, armnn::Float32, armnn::IgnoreUnused(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS16.
Referenced by RefLayerSupport::IsLayerSupported().
◆ IsUnidirectionalSequenceLstmSupported()
Definition at line 2832 of file RefLayerSupport.cpp.
2849 bool supported =
true;
2851 std::array<DataType, 2> supportedTypes =
2857 std::array<DataType, 2> supportedWeightTypes =
2863 std::array<DataType, 3> supportedBiasTypes =
2871 supported &=
CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
2872 "Reference UnidirectionalSequenceLstm: input is not a supported type.");
2873 supported &=
CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
2874 "Reference UnidirectionalSequenceLstm: output is not a supported type.");
2877 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToForgetWeights(), supportedWeightTypes),
2878 reasonIfUnsupported,
2879 "Reference UnidirectionalSequenceLstm: InputToForgetWeights "
2880 "is not a supported type.");
2881 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToCellWeights(), supportedWeightTypes),
2882 reasonIfUnsupported,
2883 "Reference UnidirectionalSequenceLstm: InputToCellWeights is not a supported type.");
2884 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToOutputWeights(), supportedWeightTypes),
2885 reasonIfUnsupported,
2886 "Reference UnidirectionalSequenceLstm: InputToOutputWeights "
2887 "is not a supported type.");
2888 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToForgetWeights(), supportedWeightTypes),
2889 reasonIfUnsupported,
2890 "Reference UnidirectionalSequenceLstm: RecurrentToForgetWeights "
2891 "is not a supported type.");
2892 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToCellWeights(), supportedWeightTypes),
2893 reasonIfUnsupported,
2894 "Reference UnidirectionalSequenceLstm: RecurrentToCellWeights "
2895 "is not a supported type.");
2896 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToOutputWeights(), supportedWeightTypes),
2897 reasonIfUnsupported,
2898 "Reference UnidirectionalSequenceLstm: RecurrentToOutputWeights "
2899 "is not a supported type.");
2901 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetGateBias(), supportedBiasTypes), reasonIfUnsupported,
2902 "Reference UnidirectionalSequenceLstm: ForgetGateBias is not a supported type.");
2903 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellBias(), supportedBiasTypes), reasonIfUnsupported,
2904 "Reference UnidirectionalSequenceLstm: CellBias is not a supported type.");
2905 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2906 "Reference UnidirectionalSequenceLstm: OutputGateBias is not a supported type.");
2907 if (!descriptor.m_CifgEnabled)
2909 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputToInputWeights(), supportedWeightTypes),
2910 reasonIfUnsupported,
2911 "Reference UnidirectionalSequenceLstm: InputToInputWeights "
2912 "is not a supported type.");
2913 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetRecurrentToInputWeights(), supportedWeightTypes),
2914 reasonIfUnsupported,
2915 "Reference UnidirectionalSequenceLstm: RecurrentToInputWeights "
2916 "is not a supported type.");
2917 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputGateBias(), supportedBiasTypes), reasonIfUnsupported,
2918 "Reference UnidirectionalSequenceLstm: InputGateBias is not a supported type.");
2919 if (descriptor.m_PeepholeEnabled)
2921 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToInputWeights(), supportedWeightTypes),
2922 reasonIfUnsupported,
2923 "Reference UnidirectionalSequenceLstm: CellToInputWeights "
2924 "is not a supported type.");
2927 if (descriptor.m_PeepholeEnabled)
2929 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToForgetWeights(), supportedWeightTypes),
2930 reasonIfUnsupported,
2931 "Reference UnidirectionalSequenceLstm: CellToForgetWeights "
2932 "is not a supported type.");
2933 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellToOutputWeights(), supportedWeightTypes),
2934 reasonIfUnsupported,
2935 "Reference UnidirectionalSequenceLstm: CellToOutputWeights "
2936 "is not a supported type.");
2938 if (descriptor.m_ProjectionEnabled)
2940 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetProjectionWeights(), supportedWeightTypes),
2941 reasonIfUnsupported,
2942 "Reference UnidirectionalSequenceLstm: ProjectionWeights "
2943 "is not a supported type.");
2944 if (paramsInfo.m_ProjectionBias !=
nullptr)
2946 supported &=
CheckSupportRule(TypesAreEqual(input, paramsInfo.GetProjectionBias()), reasonIfUnsupported,
2947 "Reference UnidirectionalSequenceLstm: input and ProjectionBias types "
2951 if (descriptor.m_LayerNormEnabled)
2953 if (!descriptor.m_CifgEnabled)
2955 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetInputLayerNormWeights(), supportedWeightTypes),
2956 reasonIfUnsupported,
2957 "Reference UnidirectionalSequenceLstm: InputLayerNormWeights "
2958 "is not a supported type.");
2960 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetForgetLayerNormWeights(), supportedWeightTypes),
2961 reasonIfUnsupported,
2962 "Reference UnidirectionalSequenceLstm: ForgetLayerNormWeights "
2963 "is not a supported type.");
2964 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetCellLayerNormWeights(), supportedWeightTypes),
2965 reasonIfUnsupported,
2966 "Reference UnidirectionalSequenceLstm: CellLayerNormWeights "
2967 "is not a supported type.");
2968 supported &=
CheckSupportRule(TypeAnyOf(paramsInfo.GetOutputLayerNormWeights(), supportedWeightTypes),
2969 reasonIfUnsupported,
2970 "Reference UnidirectionalSequenceLstm: OutputLayerNormWeights "
2971 "is not a supported type.");
References armnn::CheckSupportRule(), armnn::Float32, LstmInputParamsInfo::GetCellBias(), LstmInputParamsInfo::GetCellLayerNormWeights(), LstmInputParamsInfo::GetCellToForgetWeights(), LstmInputParamsInfo::GetCellToInputWeights(), LstmInputParamsInfo::GetCellToOutputWeights(), LstmInputParamsInfo::GetForgetGateBias(), LstmInputParamsInfo::GetForgetLayerNormWeights(), LstmInputParamsInfo::GetInputGateBias(), LstmInputParamsInfo::GetInputLayerNormWeights(), LstmInputParamsInfo::GetInputToCellWeights(), LstmInputParamsInfo::GetInputToForgetWeights(), LstmInputParamsInfo::GetInputToInputWeights(), LstmInputParamsInfo::GetInputToOutputWeights(), LstmInputParamsInfo::GetOutputGateBias(), LstmInputParamsInfo::GetOutputLayerNormWeights(), LstmInputParamsInfo::GetProjectionBias(), LstmInputParamsInfo::GetProjectionWeights(), LstmInputParamsInfo::GetRecurrentToCellWeights(), LstmInputParamsInfo::GetRecurrentToForgetWeights(), LstmInputParamsInfo::GetRecurrentToInputWeights(), LstmInputParamsInfo::GetRecurrentToOutputWeights(), armnn::IgnoreUnused(), LstmDescriptor::m_CifgEnabled, LstmDescriptor::m_LayerNormEnabled, LstmDescriptor::m_PeepholeEnabled, LstmInputParamsInfo::m_ProjectionBias, LstmDescriptor::m_ProjectionEnabled, armnn::QAsymmS8, and armnn::Signed32.
Referenced by RefLayerSupport::IsLayerSupported().
The documentation for this class was generated from the following files:
bool IsLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
#define ARMNN_ASSERT(COND)
bool IsDepthwiseConvolutionSupported(const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling3dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling3dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReverseV2Supported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQLstmSupported(const TensorInfo &input, const TensorInfo &previousOutputIn, const TensorInfo &previousCellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const QLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
constexpr bool IsQuantized8BitType(DataType dataType)
bool IsTileSupported(const TensorInfo &input, const TensorInfo &output, const TileDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFloorSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDequantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPooling2dSupported(const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDetectionPostProcessSupported(const TensorInfo &boxEncodings, const TensorInfo &scores, const TensorInfo &anchors, const TensorInfo &detectionBoxes, const TensorInfo &detectionClasses, const TensorInfo &detectionScores, const TensorInfo &numDetections, const DetectionPostProcessDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInputSupported(const TensorInfo &input, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMeanSupported(const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeConvolution2dSupported(const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsRankSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
bool IsSplitterSupported(const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvolution3dSupported(const TensorInfo &input, const TensorInfo &output, const Convolution3dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchMatMulSupported(const TensorInfo &inputX, const TensorInfo &inputY, const TensorInfo &output, const BatchMatMulDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSubtractionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsArgMinMaxSupported(const TensorInfo &input, const TensorInfo &output, const ArgMinMaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPermuteSupported(const TensorInfo &input, const TensorInfo &output, const PermuteDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsComparisonSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const ComparisonDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &mean, const TensorInfo &var, const TensorInfo &beta, const TensorInfo &gamma, const BatchNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToDepthSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsActivationSupported(const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFakeQuantizationSupported(const TensorInfo &input, const FakeQuantizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsQuantizedLstmSupported(const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsElementwiseUnarySupported(const TensorInfo &input, const TensorInfo &output, const ElementwiseUnaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConcatSupported(const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReduceSupported(const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsResizeSupported(const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDepthToSpaceSupported(const TensorInfo &input, const TensorInfo &output, const DepthToSpaceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsBatchToSpaceNdSupported(const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSpaceToBatchNdSupported(const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMergeSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsL2NormalizationSupported(const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMaximumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemImportSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsReshapeSupported(const TensorInfo &input, const TensorInfo &output, const ReshapeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsCastSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFullyConnectedSupported(const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsOutputSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsDebugSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp16ToFp32Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsLogicalBinarySupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const LogicalBinaryDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsDivisionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPreluSupported(const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsFillSupported(const TensorInfo &input, const TensorInfo &output, const FillDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
void IgnoreUnused(Ts &&...)
bool IsMultiplicationSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSupportedForDataTypeGeneric(Optional< std::string & > reasonIfUnsupported, DataType dataType, Float16Func float16FuncPtr, Float32Func float32FuncPtr, Uint8Func uint8FuncPtr, Int32Func int32FuncPtr, BooleanFunc booleanFuncPtr, Params &&... params)
@ UnidirectionalSequenceLstm
bool IsStridedSliceSupported(const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMemCopySupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConstantSupported(const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output, const ChannelShuffleDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsShapeSupported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsTransposeSupported(const TensorInfo &input, const TensorInfo &output, const TransposeDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsConvertFp32ToFp16Supported(const TensorInfo &input, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, const GatherDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsStackSupported(const std::vector< const TensorInfo * > &inputs, const TensorInfo &output, const StackDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool CheckSupportRule(F rule, Optional< std::string & > reasonIfUnsupported, const char *reason)
bool IsLogSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const LogSoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported) const
bool IsSoftmaxSupported(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsSliceSupported(const TensorInfo &input, const TensorInfo &output, const SliceDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsPadSupported(const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsGatherNdSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsUnidirectionalSequenceLstmSupported(const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const UnidirectionalSequenceLstmDescriptor &descriptor, const LstmInputParamsInfo ¶msInfo, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsInstanceNormalizationSupported(const TensorInfo &input, const TensorInfo &output, const InstanceNormalizationDescriptor &descriptor, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const
bool IsMinimumSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string & > reasonIfUnsupported=EmptyOptional()) const