// // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include namespace armnn { class NeonLayerSupport : public LayerSupportBase { public: ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsAbsSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsAdditionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsArgMinMaxSupported(const TensorInfo& input, const TensorInfo& output, const ArgMinMaxDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& mean, const TensorInfo& var, const TensorInfo& beta, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsBatchToSpaceNdSupported(const TensorInfo& input, const TensorInfo& output, const BatchToSpaceNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsComparisonSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, const ComparisonDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConcatSupported(const std::vector inputs, const TensorInfo& output, const ConcatDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConvolution2dSupported(const TensorInfo& input, const TensorInfo& output, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsDepthToSpaceSupported(const TensorInfo& input, const TensorInfo& output, const DepthToSpaceDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsDepthwiseConvolutionSupported(const TensorInfo& input, const TensorInfo& output, const DepthwiseConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsDequantizeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsDetectionPostProcessSupported(const TensorInfo& boxEncodings, const TensorInfo& scores, const TensorInfo& anchors, const TensorInfo& detectionBoxes, const TensorInfo& detectionClasses, const TensorInfo& detectionScores, const TensorInfo& numDetections, const DetectionPostProcessDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input, const TensorInfo& output, const DepthwiseConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, Optional reason = EmptyOptional()) const override; bool IsElementwiseUnarySupported(const TensorInfo& input, const TensorInfo& output, const ElementwiseUnaryDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsFullyConnectedSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, const TensorInfo& biases, const FullyConnectedDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead") bool IsGreaterSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsInputSupported(const TensorInfo& input, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsInstanceNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const InstanceNormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsL2NormalizationSupported(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsLstmSupported(const TensorInfo& input, const TensorInfo& outputStateIn, const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer, const TensorInfo& outputStateOut, const TensorInfo& cellStateOut, const TensorInfo& output, const LstmDescriptor& descriptor, const LstmInputParamsInfo& paramsInfo, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMaximumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMeanSupported(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead") bool IsMergerSupported(const std::vector inputs, const TensorInfo& output, const MergerDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMinimumSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsDivisionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsOutputSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsPadSupported(const TensorInfo& input, const TensorInfo& output, const PadDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsPermuteSupported(const TensorInfo& input, const TensorInfo& output, const PermuteDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsPooling2dSupported(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsPreluSupported(const TensorInfo& input, const TensorInfo& alpha, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsQuantizeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsQuantizedLstmSupported(const TensorInfo& input, const TensorInfo& cellStateIn, const TensorInfo& outputStateIn, const TensorInfo& cellStateOut, const TensorInfo& outputStateOut, const QuantizedLstmInputParamsInfo& paramsInfo, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsReshapeSupported(const TensorInfo& input, const TensorInfo& output, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsResizeSupported(const TensorInfo& input, const TensorInfo& output, const ResizeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead") bool IsResizeBilinearSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead") bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsSliceSupported(const TensorInfo& input, const TensorInfo& output, const SliceDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsSpaceToBatchNdSupported(const TensorInfo& input, const TensorInfo& output, const SpaceToBatchNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsSpaceToDepthSupported(const TensorInfo& input, const TensorInfo& output, const SpaceToDepthDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead") bool IsSplitterSupported(const TensorInfo& input, const ViewsDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsSplitterSupported(const TensorInfo& input, const std::vector>& outputs, const ViewsDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsStackSupported(const std::vector& inputs, const TensorInfo& output, const StackDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsTransposeConvolution2dSupported(const TensorInfo& input, const TensorInfo& output, const TransposeConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, Optional reasonIfUnsupported = EmptyOptional()) const override; }; // class NeonLayerSupport } // namespace armnn