aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-01-28 14:25:15 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2021-01-28 14:28:51 +0000
commit7909c53e3b91f96a12f2d587741575d4d1becdce (patch)
tree4b79f3b6256fd98d0521fb60f70a0a7d4c095cd6 /include
parentbbc876c0b7046d6cc3aa9a8d64f80a755027d0cf (diff)
downloadarmnn-7909c53e3b91f96a12f2d587741575d4d1becdce.tar.gz
IVGCVSW-4874 Provide LayerSupportHandle to frontend users
* Add test for new IsBackendRegistered member function of Handle * Move deprecated messages to new frontend API of LayerSupportHandle * Update delegate to use dot operator for IsXXXLayerSupported Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I70d7166e207a10e4b3583a827ca0dda2169bcba1 !android-nn-driver:4940
Diffstat (limited to 'include')
-rw-r--r--include/armnn/BackendHelper.hpp412
-rw-r--r--include/armnn/ILayerSupport.hpp8
2 files changed, 410 insertions, 10 deletions
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 6a6c8b9c15..3d0632da5e 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -11,7 +11,415 @@
namespace armnn
{
-/// Convenience function to retrieve the ILayerSupport for a backend
-std::shared_ptr<ILayerSupport> GetILayerSupportByBackendId(const armnn::BackendId& backend);
+// This handle calls its own IsXXXLayerSupported() functions which then call the polymorphic
+// ILayerSupport::IsXXXLayerSupported() at the framework level so there is no risk of VTable misalignment.
+// This is to make ILayerSupport in its abstract form a solely Backend interface alongside a
+// separate ABI stable frontend class free of virtual functions via an added layer of indirection.
+class LayerSupportHandle
+{
+public:
+ explicit LayerSupportHandle(std::shared_ptr<ILayerSupport> layerSupport)
+ : m_LayerSupport(std::move(layerSupport)) {};
+
+ bool IsBackendRegistered() const;
+
+ ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
+ bool IsAbsSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsArgMinMaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ArgMinMaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsComparisonSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ComparisonDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDebugSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDepthToSpaceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthToSpaceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
+ const TensorInfo& scores,
+ const TensorInfo& anchors,
+ const TensorInfo& detectionBoxes,
+ const TensorInfo& detectionClasses,
+ const TensorInfo& detectionScores,
+ const TensorInfo& numDetections,
+ const DetectionPostProcessDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDilatedDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsElementwiseUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
+ bool IsEqualSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFillSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const FillDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const GatherDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
+ bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsInstanceNormalizationSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLogSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const LogSoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const LstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMaximumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMemImportSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMergeSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
+ bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPreCompiledSupported(const TensorInfo& input,
+ const PreCompiledDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPreluSupported(const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsQuantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsQLstmSupported(const TensorInfo& input,
+ const TensorInfo& previousOutputIn,
+ const TensorInfo& previousCellStateIn,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const QLstmDescriptor& descriptor,
+ const LstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsQuantizedLstmSupported(const TensorInfo& input,
+ const TensorInfo& previousCellStateIn,
+ const TensorInfo& previousOutputIn,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const QuantizedLstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsReshapeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ReshapeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
+ bool IsResizeBilinearSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
+ bool IsRsqrtSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSpaceToBatchNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToBatchNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSpaceToDepthSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
+ bool IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+
+ bool IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSwitchSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output0,
+ const TensorInfo& output1,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsTransposeConvolution2dSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsTransposeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+private:
+ std::shared_ptr<ILayerSupport> m_LayerSupport;
+};
+
+/// Convenience function to retrieve the ILayerSupportHandle for a backend
+LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId& backend);
}
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 200409361c..5bcfc7d3cd 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -27,7 +27,6 @@ protected:
virtual ~ILayerSupport() {}
public:
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
virtual bool IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -147,7 +146,6 @@ public:
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
virtual bool IsEqualSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -173,7 +171,6 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
virtual bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -185,7 +182,6 @@ public:
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
virtual bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
@@ -255,7 +251,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
@@ -334,7 +329,6 @@ public:
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
virtual bool IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -344,7 +338,6 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
virtual bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -369,7 +362,6 @@ public:
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
virtual bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;