aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-01-28 14:25:15 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2021-01-28 14:28:51 +0000
commit7909c53e3b91f96a12f2d587741575d4d1becdce (patch)
tree4b79f3b6256fd98d0521fb60f70a0a7d4c095cd6
parentbbc876c0b7046d6cc3aa9a8d64f80a755027d0cf (diff)
downloadarmnn-7909c53e3b91f96a12f2d587741575d4d1becdce.tar.gz
IVGCVSW-4874 Provide LayerSupportHandle to frontend users
* Add test for new IsBackendRegistered member function of Handle * Move deprecated messages to new frontend API of LayerSupportHandle * Update delegate to use dot operator for IsXXXLayerSupported Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I70d7166e207a10e4b3583a827ca0dda2169bcba1 !android-nn-driver:4940
-rw-r--r--delegate/src/DelegateUtils.hpp4
-rw-r--r--include/armnn/BackendHelper.hpp412
-rw-r--r--include/armnn/ILayerSupport.hpp8
-rw-r--r--src/armnn/BackendHelper.cpp692
-rw-r--r--src/armnn/test/UtilsTests.cpp14
5 files changed, 1115 insertions, 15 deletions
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 58eeb9ab63..17261e4d40 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -29,11 +29,11 @@ try \
for (auto&& backendId : backends) \
{ \
auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
- if (layerSupportObject) \
+ if (layerSupportObject.IsBackendRegistered()) \
{ \
std::string reasonIfUnsupported; \
supported = \
- layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+ layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
if (supported) \
{ \
break; \
diff --git a/include/armnn/BackendHelper.hpp b/include/armnn/BackendHelper.hpp
index 6a6c8b9c15..3d0632da5e 100644
--- a/include/armnn/BackendHelper.hpp
+++ b/include/armnn/BackendHelper.hpp
@@ -11,7 +11,415 @@
namespace armnn
{
-/// Convenience function to retrieve the ILayerSupport for a backend
-std::shared_ptr<ILayerSupport> GetILayerSupportByBackendId(const armnn::BackendId& backend);
+// This handle calls its own IsXXXLayerSupported() functions which then call the polymorphic
+// ILayerSupport::IsXXXLayerSupported() at the framework level so there is no risk of VTable misalignment.
+// This is to make ILayerSupport in its abstract form a solely Backend interface alongside a
+// separate ABI stable frontend class free of virtual functions via an added layer of indirection.
+class LayerSupportHandle
+{
+public:
+ explicit LayerSupportHandle(std::shared_ptr<ILayerSupport> layerSupport)
+ : m_LayerSupport(std::move(layerSupport)) {};
+
+ bool IsBackendRegistered() const;
+
+ ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
+ bool IsAbsSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsArgMinMaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ArgMinMaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsComparisonSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ComparisonDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDebugSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDepthToSpaceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthToSpaceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
+ const TensorInfo& scores,
+ const TensorInfo& anchors,
+ const TensorInfo& detectionBoxes,
+ const TensorInfo& detectionClasses,
+ const TensorInfo& detectionScores,
+ const TensorInfo& numDetections,
+ const DetectionPostProcessDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDilatedDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsElementwiseUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
+ bool IsEqualSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFillSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const FillDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const GatherDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
+ bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsInstanceNormalizationSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLogSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const LogSoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const LstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMaximumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMemImportSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMergeSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
+ bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPreCompiledSupported(const TensorInfo& input,
+ const PreCompiledDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsPreluSupported(const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsQuantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsQLstmSupported(const TensorInfo& input,
+ const TensorInfo& previousOutputIn,
+ const TensorInfo& previousCellStateIn,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const QLstmDescriptor& descriptor,
+ const LstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsQuantizedLstmSupported(const TensorInfo& input,
+ const TensorInfo& previousCellStateIn,
+ const TensorInfo& previousOutputIn,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const QuantizedLstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsReshapeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ReshapeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
+ bool IsResizeBilinearSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
+ bool IsRsqrtSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSpaceToBatchNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToBatchNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSpaceToDepthSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
+ bool IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+
+ bool IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsSwitchSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output0,
+ const TensorInfo& output1,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsTransposeConvolution2dSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+ bool IsTransposeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional());
+
+private:
+ std::shared_ptr<ILayerSupport> m_LayerSupport;
+};
+
+/// Convenience function to retrieve the ILayerSupportHandle for a backend
+LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId& backend);
}
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 200409361c..5bcfc7d3cd 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -27,7 +27,6 @@ protected:
virtual ~ILayerSupport() {}
public:
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
virtual bool IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -147,7 +146,6 @@ public:
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
virtual bool IsEqualSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -173,7 +171,6 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsGatherSupported with descriptor instead")
virtual bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
@@ -185,7 +182,6 @@ public:
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
virtual bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
@@ -255,7 +251,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsConcatSupported instead")
virtual bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
@@ -334,7 +329,6 @@ public:
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsResizeSupported instead")
virtual bool IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -344,7 +338,6 @@ public:
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
virtual bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
@@ -369,7 +362,6 @@ public:
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
- ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
virtual bool IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 84889b80e4..fb74877049 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -11,18 +11,704 @@
namespace armnn
{
-std::shared_ptr<ILayerSupport> GetILayerSupportByBackendId(const armnn::BackendId& backend)
+// Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
+LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId& backend)
{
BackendRegistry& backendRegistry = armnn::BackendRegistryInstance();
if (!backendRegistry.IsBackendRegistered(backend))
{
- return nullptr;
+ return LayerSupportHandle(nullptr);
}
auto factoryFunc = backendRegistry.GetFactory(backend);
auto backendObject = factoryFunc();
- return backendObject->GetLayerSupport();
+ return LayerSupportHandle(backendObject->GetLayerSupport());
}
+bool LayerSupportHandle::IsBackendRegistered() const
+{
+ if (m_LayerSupport)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+
+bool LayerSupportHandle::IsAbsSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ // Call the IsXXXLayerSupport function of the specific backend.
+ return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ArgMinMaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsBatchNormalizationSupported(input,
+ output,
+ mean,
+ var,
+ beta,
+ gamma,
+ descriptor,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const BatchToSpaceNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsBatchToSpaceNdSupported(input,
+ output,
+ descriptor,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ComparisonDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsConvolution2dSupported(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthToSpaceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
+ const TensorInfo& scores,
+ const TensorInfo& anchors,
+ const TensorInfo& detectionBoxes,
+ const TensorInfo& detectionClasses,
+ const TensorInfo& detectionScores,
+ const TensorInfo& numDetections,
+ const DetectionPostProcessDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
+ scores,
+ anchors,
+ detectionBoxes,
+ detectionClasses,
+ detectionScores,
+ numDetections,
+ descriptor,
+ reasonIfUnsupported);
+}
+
+bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported);
+}
+
+bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsEqualSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const FillDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsFullyConnectedSupported(input,
+ output,
+ weights,
+ biases,
+ descriptor,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const GatherDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsInstanceNormalizationSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsLogicalBinarySupported(input0,
+ input1,
+ output,
+ descriptor,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const LogSoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
}
+
+bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const LstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsLstmSupported(input,
+ outputStateIn,
+ cellStateIn,
+ scratchBuffer,
+ outputStateOut,
+ cellStateOut,
+ output,
+ descriptor,
+ paramsInfo,
+ reasonIfUnsupported);
+}
+
+bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMemCopySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMemImportSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
+ const PreCompiledDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsQuantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
+ const TensorInfo& previousOutputIn,
+ const TensorInfo& previousCellStateIn,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const QLstmDescriptor& descriptor,
+ const LstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsQLstmSupported(input,
+ previousOutputIn,
+ previousCellStateIn,
+ outputStateOut,
+ cellStateOut,
+ output,
+ descriptor,
+ paramsInfo,
+ reasonIfUnsupported);
+}
+
+bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
+ const TensorInfo& previousCellStateIn,
+ const TensorInfo& previousOutputIn,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const QuantizedLstmInputParamsInfo& paramsInfo,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsQuantizedLstmSupported(input,
+ previousCellStateIn,
+ previousOutputIn,
+ cellStateOut,
+ output,
+ paramsInfo,
+ reasonIfUnsupported);
+}
+
+bool LayerSupportHandle::IsRankSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ReshapeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsResizeBilinearSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsRsqrtSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToBatchNdDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SpaceToDepthDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
+}
+
+
+bool LayerSupportHandle::IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output0,
+ const TensorInfo& output1,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsTransposeConvolution2dSupported(
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsTransposeConvolution2dSupported(input,
+ output,
+ descriptor,
+ weights,
+ biases,
+ reasonIfUnsupported.value());
+}
+
+bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TransposeDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
+} \ No newline at end of file
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index fb078de32f..7776a2d3cf 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -5,6 +5,7 @@
#include <boost/test/unit_test.hpp>
+#include <armnn/BackendHelper.hpp>
#include <armnn/Utils.hpp>
#include <armnn/Types.hpp>
#include <armnn/TypesUtils.hpp>
@@ -266,4 +267,17 @@ BOOST_AUTO_TEST_CASE(PermuteQuantizationDim)
BOOST_CHECK(permuted.GetQuantizationDim().value() == 1U);
}
+#if defined(ARMNNREF_ENABLED)
+BOOST_AUTO_TEST_CASE(LayerSupportHandle)
+{
+ auto layerSupportObject = armnn::GetILayerSupportByBackendId("CpuRef");
+ armnn::TensorInfo input;
+ std::string reasonIfUnsupported;
+ // InputLayer always supported for CpuRef
+ BOOST_CHECK_EQUAL(layerSupportObject.IsInputSupported(input, reasonIfUnsupported), true);
+
+ BOOST_CHECK(layerSupportObject.IsBackendRegistered());
+}
+#endif
+
BOOST_AUTO_TEST_SUITE_END()