aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-12-24 12:24:40 +0000
committerCathal Corbett <cathal.corbett@arm.com>2022-01-19 12:58:56 +0000
commit34b429c2215bab7fd12b761dd5c200414c1b4a5b (patch)
tree7518ec40c8e56dbf229421d47d9527ed7aadd33e
parent479e230479c10dcf9b9a79a3e80f0847d6ae5293 (diff)
downloadarmnn-34b429c2215bab7fd12b761dd5c200414c1b4a5b.tar.gz
IVGCVSW-6629 Stabilize the ILayerSupport interface with unified strategy.
* New Virtual Function Added. * Implemented in Ref Neon CL with switch statement for all layers. * Deprecate original IsXXXLayerSupported functions. * Ensure Npu not broken with change. Change-Id: Icf61b16beec83d6af1cb287e24ab1e98a6138c8c Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/Descriptors.hpp5
-rw-r--r--include/armnn/backends/ILayerSupport.hpp83
-rw-r--r--src/armnn/BackendHelper.cpp764
-rw-r--r--src/armnn/ILayerSupport.cpp586
-rw-r--r--src/armnn/LayerSupport.cpp495
-rw-r--r--src/armnn/test/OptimizerTests.cpp23
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp47
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp71
-rw-r--r--src/backends/backendsCommon/test/DynamicBackendTests.hpp20
-rw-r--r--src/backends/backendsCommon/test/MockBackend.hpp49
-rw-r--r--src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp24
-rw-r--r--src/backends/cl/ClLayerSupport.cpp412
-rw-r--r--src/backends/cl/ClLayerSupport.hpp9
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp421
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp9
-rw-r--r--src/backends/reference/RefLayerSupport.cpp486
-rw-r--r--src/backends/reference/RefLayerSupport.hpp9
19 files changed, 3241 insertions, 274 deletions
diff --git a/Android.mk b/Android.mk
index df8cc8a670..fde1ffa898 100644
--- a/Android.mk
+++ b/Android.mk
@@ -122,6 +122,7 @@ LOCAL_SRC_FILES := \
src/armnn/Descriptors.cpp \
src/armnn/Exceptions.cpp \
src/armnn/Graph.cpp \
+ src/armnn/ILayerSupport.cpp \
src/armnn/InternalTypes.cpp \
src/armnn/JsonPrinter.cpp \
src/armnn/Layer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 551ccbf581..629a79880b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -347,6 +347,7 @@ list(APPEND armnn_sources
src/armnn/Graph.cpp
src/armnn/Graph.hpp
src/armnn/IGraphObservable.hpp
+ src/armnn/ILayerSupport.cpp
src/armnn/Instrument.hpp
src/armnn/InternalTypes.cpp
src/armnn/InternalTypes.hpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 342d952277..b37db540da 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -19,7 +19,10 @@ namespace armnn
{
/// Base class for all descriptors.
-struct BaseDescriptor {};
+struct BaseDescriptor
+{
+ virtual ~BaseDescriptor() = default;
+};
/// An ActivationDescriptor for the ActivationLayer.
struct ActivationDescriptor : BaseDescriptor
diff --git a/include/armnn/backends/ILayerSupport.hpp b/include/armnn/backends/ILayerSupport.hpp
index 519a006416..29d08f539e 100644
--- a/include/armnn/backends/ILayerSupport.hpp
+++ b/include/armnn/backends/ILayerSupport.hpp
@@ -5,7 +5,7 @@
#pragma once
#include <armnn/Deprecated.hpp>
-#include <armnn/DescriptorsFwd.hpp>
+#include <armnn/Descriptors.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/Optional.hpp>
#include <armnn/QuantizedLstmParams.hpp>
@@ -27,21 +27,33 @@ protected:
virtual ~ILayerSupport() {}
public:
+ virtual bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo = EmptyOptional(),
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo =
+ EmptyOptional(),
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsArgMinMaxSupported(const TensorInfo& input,
const TensorInfo& output,
const ArgMinMaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
@@ -51,50 +63,61 @@ public:
const BatchNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsBatchToSpaceNdSupported(const TensorInfo& input,
const TensorInfo& output,
const BatchToSpaceNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsCastSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsChannelShuffleSupported(const TensorInfo& input,
const TensorInfo& output,
const ChannelShuffleDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const ComparisonDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConvertBf16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConvertFp32ToBf16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConvolution2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
@@ -102,6 +125,7 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsConvolution3dSupported(const TensorInfo& input,
const TensorInfo& output,
const Convolution3dDescriptor& descriptor,
@@ -109,15 +133,18 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDepthToSpaceSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthToSpaceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDepthwiseConvolutionSupported(
const TensorInfo& input,
const TensorInfo& output,
@@ -126,10 +153,12 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDequantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
const TensorInfo& scores,
const TensorInfo& anchors,
@@ -140,6 +169,7 @@ public:
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const =0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDilatedDepthwiseConvolutionSupported(
const TensorInfo& input,
const TensorInfo& output,
@@ -148,29 +178,35 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsElementwiseUnarySupported(const TensorInfo& input,
const TensorInfo& output,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsFillSupported(const TensorInfo& input,
const TensorInfo& output,
const FillDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsFullyConnectedSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
@@ -178,42 +214,50 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsInstanceNormalizationSupported(
const TensorInfo& input,
const TensorInfo& output,
const InstanceNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsL2NormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsLogicalBinarySupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const LogicalBinaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsLogicalUnarySupported(const TensorInfo& input,
const TensorInfo& output,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsLstmSupported(const TensorInfo& input,
const TensorInfo& outputStateIn,
const TensorInfo& cellStateIn,
@@ -225,80 +269,97 @@ public:
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMeanSupported(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMemCopySupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMemImportSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMergeSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& ouput,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsPadSupported(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsPermuteSupported(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsPooling2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsPooling3dSupported(const TensorInfo& input,
const TensorInfo& output,
const Pooling3dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsPreCompiledSupported(const TensorInfo& input,
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsPreluSupported(const TensorInfo& input,
const TensorInfo& alpha,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsQuantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsQLstmSupported(const TensorInfo& input,
const TensorInfo& previousOutputIn,
const TensorInfo& previousCellStateIn,
@@ -309,6 +370,7 @@ public:
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsQuantizedLstmSupported(const TensorInfo& input,
const TensorInfo& previousCellStateIn,
const TensorInfo& previousOutputIn,
@@ -317,81 +379,96 @@ public:
const QuantizedLstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsRankSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsReduceSupported(const TensorInfo& input,
const TensorInfo& output,
const ReduceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsReshapeSupported(const TensorInfo& input,
const TensorInfo& output,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSpaceToBatchNdSupported(const TensorInfo& input,
const TensorInfo& output,
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSpaceToDepthSupported(const TensorInfo& input,
const TensorInfo& output,
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
const std::vector<const TensorInfo*>& outputs,
const StandInDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
-
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsSwitchSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output0,
const TensorInfo& output1,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
@@ -400,11 +477,13 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsTransposeSupported(const TensorInfo& input,
const TensorInfo& output,
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsUnidirectionalSequenceLstmSupported(
const TensorInfo& input,
const TensorInfo& outputStateIn,
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index f561b93c12..23c4003107 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -148,12 +148,21 @@ bool LayerSupportHandle::IsBackendRegistered() const
return false;
}
+using TensorInfos = std::vector<TensorInfo>;
+
bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Activation,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
@@ -161,7 +170,14 @@ bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Addition,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
@@ -169,7 +185,14 @@ bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
const ArgMinMaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ArgMinMax,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
@@ -181,14 +204,14 @@ bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
const BatchNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsBatchNormalizationSupported(input,
- output,
- mean,
- var,
- beta,
- gamma,
- descriptor,
- reasonIfUnsupported.value());
+ TensorInfos infos{input, output, mean, var, beta, gamma};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::BatchNormalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
@@ -196,27 +219,43 @@ bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
const BatchToSpaceNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsBatchToSpaceNdSupported(input,
- output,
- descriptor,
- reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::BatchToSpaceNd,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsCastSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsCastSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Cast,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
-bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output,
+bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input,
+ const TensorInfo &output,
const ChannelShuffleDescriptor &descriptor,
Optional<std::string &> reasonIfUnsupported)
{
- return m_LayerSupport->IsChannelShuffleSupported(input,
- output,
- descriptor,
- reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ChannelShuffle,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
@@ -225,7 +264,14 @@ bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
const ComparisonDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Comparison,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
@@ -233,41 +279,88 @@ bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*>
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos;
+ for (const TensorInfo* inputInfo : inputs)
+ {
+ infos.push_back(*inputInfo);
+ }
+ infos.push_back(output);
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Concat,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
+ TensorInfos infos{output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Constant,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ConvertBf16ToFp32,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToBf16,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp16ToFp32,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ConvertFp32ToFp16,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
@@ -277,12 +370,15 @@ bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConvolution2dSupported(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported.value());
+ TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+ TensorInfos infos{input, output, weights, biasesVal};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Convolution2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsConvolution3dSupported(const TensorInfo& input,
@@ -292,19 +388,29 @@ bool LayerSupportHandle::IsConvolution3dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsConvolution3dSupported(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported.value());
+ TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+ TensorInfos infos{input, output, weights, biasesVal};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Convolution3d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Debug,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
@@ -312,7 +418,14 @@ bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
const DepthToSpaceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::DepthToSpace,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
@@ -323,19 +436,29 @@ bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported.value());
+ TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+ TensorInfos infos{input, output, weights, biasesVal};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDequantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Dequantize,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
@@ -348,15 +471,14 @@ bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEn
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
- scores,
- anchors,
- detectionBoxes,
- detectionClasses,
- detectionScores,
- numDetections,
- descriptor,
- reasonIfUnsupported);
+ TensorInfos infos{boxEncodings, scores, anchors, detectionBoxes, detectionClasses, detectionScores, numDetections};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::DetectionPostProcess,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
@@ -367,12 +489,15 @@ bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported);
+ TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+ TensorInfos infos{input, output, weights, biasesVal};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::DepthwiseConvolution2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
@@ -380,7 +505,14 @@ bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Division,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
@@ -388,14 +520,28 @@ bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::FakeQuantization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
@@ -403,14 +549,28 @@ bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
const FillDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Fill,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Floor,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
@@ -462,12 +622,14 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
}
}
- return m_LayerSupport->IsFullyConnectedSupported(input,
- output,
- weights,
- biases,
- descriptor,
- reasonIfUnsupported.value());
+ TensorInfos infos{input, output, weights, biases};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::FullyConnected,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
@@ -476,13 +638,27 @@ bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Gather,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
+ TensorInfos infos{input};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Input,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsInstanceNormalizationSupported(
@@ -491,7 +667,14 @@ bool LayerSupportHandle::IsInstanceNormalizationSupported(
const InstanceNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::InstanceNormalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
@@ -499,7 +682,14 @@ bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::L2Normalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
@@ -508,11 +698,14 @@ bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
const LogicalBinaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsLogicalBinarySupported(input0,
- input1,
- output,
- descriptor,
- reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::LogicalBinary,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
@@ -520,7 +713,14 @@ bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::ElementwiseUnary,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
@@ -528,7 +728,14 @@ bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
const LogSoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::LogSoftmax,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
@@ -542,16 +749,14 @@ bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsLstmSupported(input,
- outputStateIn,
- cellStateIn,
- scratchBuffer,
- outputStateOut,
- cellStateOut,
- output,
- descriptor,
- paramsInfo,
- reasonIfUnsupported);
+ TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Lstm,
+ infos,
+ descriptor,
+ paramsInfo,
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
@@ -559,7 +764,14 @@ bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Maximum,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
@@ -567,21 +779,42 @@ bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
const MeanDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Mean,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMemCopySupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::MemCopy,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMemImportSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::MemImport,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
@@ -589,7 +822,14 @@ bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Merge,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
@@ -597,7 +837,14 @@ bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Minimum,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
@@ -605,7 +852,14 @@ bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Multiplication,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
@@ -613,13 +867,27 @@ bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
const NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Normalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
+ TensorInfos infos{output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Output,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
@@ -627,7 +895,14 @@ bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Pad,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
@@ -635,7 +910,14 @@ bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Permute,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
@@ -643,7 +925,14 @@ bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Pooling2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsPooling3dSupported(const TensorInfo& input,
@@ -651,14 +940,28 @@ bool LayerSupportHandle::IsPooling3dSupported(const TensorInfo& input,
const Pooling3dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsPooling3dSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Pooling3d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::PreCompiled,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
@@ -666,14 +969,28 @@ bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, alpha, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Prelu,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsQuantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Quantize,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
@@ -686,15 +1003,14 @@ bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsQLstmSupported(input,
- previousOutputIn,
- previousCellStateIn,
- outputStateOut,
- cellStateOut,
- output,
+ TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::QLstm,
+ infos,
descriptor,
paramsInfo,
- reasonIfUnsupported);
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
@@ -705,20 +1021,28 @@ bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
const QuantizedLstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsQuantizedLstmSupported(input,
- previousCellStateIn,
- previousOutputIn,
- cellStateOut,
- output,
- paramsInfo,
- reasonIfUnsupported);
+ TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::QuantizedLstm,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ paramsInfo,
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsRankSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Rank,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsReduceSupported(const TensorInfo& input,
@@ -726,7 +1050,14 @@ bool LayerSupportHandle::IsReduceSupported(const TensorInfo& input,
const ReduceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsReduceSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Reduce,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
@@ -734,7 +1065,14 @@ bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Reshape,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
@@ -742,14 +1080,28 @@ bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Resize,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsShapeSupported(input, output, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Shape,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
@@ -757,7 +1109,14 @@ bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Slice,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
@@ -765,7 +1124,14 @@ bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Softmax,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
@@ -773,7 +1139,14 @@ bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::SpaceToBatchNd,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
@@ -781,7 +1154,14 @@ bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::SpaceToDepth,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
@@ -789,7 +1169,18 @@ bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input};
+ for (TensorInfo outInfo : outputs)
+ {
+ infos.push_back(outInfo);
+ }
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Splitter,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
@@ -797,7 +1188,19 @@ bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>&
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos;
+ for (const TensorInfo* inputInfo : inputs)
+ {
+ infos.push_back(*inputInfo);
+ }
+ infos.push_back(output);
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Stack,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
@@ -805,7 +1208,22 @@ bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>
const StandInDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos;
+ for (const TensorInfo* inputInfo : inputs)
+ {
+ infos.push_back(*inputInfo);
+ }
+ for (const TensorInfo* outputInfo : outputs)
+ {
+ infos.push_back(*outputInfo);
+ }
+
+ return m_LayerSupport->IsLayerSupported(LayerType::StandIn,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
@@ -814,7 +1232,14 @@ bool LayerSupportHandle::IsStridedSliceSupported(const TensorInfo& input,
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::StridedSlice,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
@@ -822,7 +1247,14 @@ bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Subtraction,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
@@ -831,7 +1263,14 @@ bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
const TensorInfo& output1,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
+ TensorInfos infos{input0, input1, output0, output1};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Switch,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsTransposeConvolution2dSupported(
@@ -842,12 +1281,15 @@ bool LayerSupportHandle::IsTransposeConvolution2dSupported(
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsTransposeConvolution2dSupported(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported.value());
+ TensorInfo biasesVal = biases.has_value() ? biases.value() : TensorInfo();
+ TensorInfos infos{input, output, weights, biasesVal};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::TransposeConvolution2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
@@ -855,7 +1297,14 @@ bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Transpose,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
bool LayerSupportHandle::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
@@ -868,15 +1317,16 @@ bool LayerSupportHandle::IsUnidirectionalSequenceLstmSupported(const TensorInfo&
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported)
{
- return m_LayerSupport->IsUnidirectionalSequenceLstmSupported(input,
- outputStateIn,
- cellStateIn,
- output,
- hiddenStateOutput,
- cellStateOutput,
- descriptor,
- paramsInfo,
- reasonIfUnsupported);
+ TensorInfo hiddenStateOutputVal = hiddenStateOutput.has_value() ? hiddenStateOutput.value() : TensorInfo();
+ TensorInfo cellStateOutputVal = cellStateOutput.has_value() ? cellStateOutput.value() : TensorInfo();
+ TensorInfos infos{input, outputStateIn, cellStateIn, hiddenStateOutputVal, cellStateOutputVal, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::UnidirectionalSequenceLstm,
+ infos,
+ descriptor,
+ paramsInfo,
+ EmptyOptional(),
+ reasonIfUnsupported.value());
}
} \ No newline at end of file
diff --git a/src/armnn/ILayerSupport.cpp b/src/armnn/ILayerSupport.cpp
new file mode 100644
index 0000000000..76926d9558
--- /dev/null
+++ b/src/armnn/ILayerSupport.cpp
@@ -0,0 +1,586 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Types.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/backends/ILayerSupport.hpp>
+
+namespace armnn
+{
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+// IsLayerSupport() forwards to the deprecated virtual methods depending on input LayerType.
+// Allows backends continue to behave as before maintaining backward compatibility.
+bool ILayerSupport::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch (type)
+ {
+ case LayerType::Activation:
+ return IsActivationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::ArgMinMax:
+ return IsArgMinMaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchNormalization:
+ return IsBatchNormalizationSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchToSpaceNd:
+ return IsBatchToSpaceNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Comparison:
+ {
+ return IsComparisonSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Concat:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < (infos.size() - 1); i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsConcatSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Constant:
+ return IsConstantSupported(infos[0],
+ reasonIfUnsupported);
+ case LayerType::ConvertBf16ToFp32:
+ return IsConvertBf16ToFp32Supported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::ConvertFp16ToFp32:
+ return IsConvertFp16ToFp32Supported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::ConvertFp32ToBf16:
+ return IsConvertFp32ToBf16Supported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::ConvertFp32ToFp16:
+ return IsConvertFp32ToFp16Supported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::Convolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution2d "
+ "TensorInfos. TensorInfos should be of format: "
+ "{input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Debug:
+ return IsDebugSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::DepthToSpace:
+ return IsDepthToSpaceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::DepthwiseConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d "
+ "TensorInfos. TensorInfos should be of format: "
+ "{input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Dequantize:
+ return IsDequantizeSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::DetectionPostProcess:
+ return IsDetectionPostProcessSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Division:
+ return IsDivisionSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::ElementwiseUnary:
+ return IsElementwiseUnarySupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::FakeQuantization:
+ return IsFakeQuantizationSupported(infos[0],
+ *(PolymorphicDowncast<const FakeQuantizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Fill:
+ return IsFillSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Floor:
+ return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::FullyConnected:
+ return IsFullyConnectedSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Gather:
+ return IsGatherSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::InstanceNormalization:
+ return IsInstanceNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::L2Normalization:
+ return IsL2NormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const L2NormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogicalBinary:
+ return IsLogicalBinarySupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogSoftmax:
+ return IsLogSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Lstm:
+ return IsLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::QLstm:
+ return IsQLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Map:
+ return true;
+ case LayerType::Maximum:
+ return IsMaximumSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Mean:
+ return IsMeanSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::MemCopy:
+ return IsMemCopySupported(std::move(infos[0]),
+ std::move(infos[1]),
+ reasonIfUnsupported);
+ case LayerType::MemImport:
+ return IsMemImportSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::Merge:
+ return IsMergeSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Minimum:
+ return IsMinimumSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Multiplication:
+ return IsMultiplicationSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Normalization:
+ return IsNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Pad:
+ return IsPadSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Permute:
+ return IsPermuteSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Pooling2d:
+ return IsPooling2dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::PreCompiled:
+ return IsPreCompiledSupported(infos[0],
+ *(PolymorphicDowncast<const PreCompiledDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Prelu:
+ return IsPreluSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Quantize:
+ return IsQuantizeSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::QuantizedLstm:
+ return IsQuantizedLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ quantizedLstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Reshape:
+ return IsReshapeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Rank:
+ return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Resize:
+ return IsResizeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Reduce:
+ return IsReduceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Slice:
+ return IsSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Softmax:
+ return IsSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToBatchNd:
+ return IsSpaceToBatchNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToDepth:
+ return IsSpaceToDepthSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Splitter:
+ {
+ std::vector<TensorInfo> outputInfos;
+ for (uint32_t i = 1; i < infos.size(); i++)
+ {
+ outputInfos.push_back(infos[i]);
+ }
+ return IsSplitterSupported(infos[0],
+ {outputInfos.begin(), outputInfos.end()},
+ *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Stack:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < infos.size() - 1; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsStackSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::StandIn:
+ {
+ auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor));
+
+ if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs))
+ {
+ throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal "
+ "the combined number of input and output slots assigned "
+ "to the StandIn descriptor");
+ }
+
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < desc.m_NumInputs; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ std::vector<const TensorInfo*> outputInfos;
+ for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++)
+ {
+ outputInfos.push_back(&infos[i]);
+ }
+
+ return IsStandInSupported(inputInfos,
+ outputInfos,
+ desc,
+ reasonIfUnsupported);
+ }
+ case LayerType::StridedSlice:
+ return IsStridedSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Subtraction:
+ return IsSubtractionSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Switch:
+ return IsSwitchSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ case LayerType::Transpose:
+ return IsTransposeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::TransposeConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d "
+ "TensorInfos. TensorInfos should be of format: "
+ "{input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Unmap:
+ return true;
+ case LayerType::Cast:
+ return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Shape:
+ return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::UnidirectionalSequenceLstm:
+ {
+ if (infos.size() != 6)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. TensorInfos "
+ "should be of format: {input, outputStateIn, cellStateIn, "
+ "hiddenStateOutputVal, cellStateOutputVal, output}");
+ }
+ auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
+
+ bool isHiddenStateOutputOptional = (infos[4] == TensorInfo());
+ bool isCellStateOutput = (infos[5] == TensorInfo());
+ if (isHiddenStateOutputOptional && isCellStateOutput)
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ EmptyOptional(),
+ EmptyOptional(),
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ else if (isHiddenStateOutputOptional)
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ EmptyOptional(),
+ infos[5],
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ else if (isCellStateOutput)
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ EmptyOptional(),
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::ChannelShuffle:
+ return IsChannelShuffleSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Convolution3d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution3d "
+ "TensorInfos. TensorInfos should be of format: "
+ "{input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Pooling3d:
+ return IsPooling3dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ default:
+ return false;
+ }
+}
+ARMNN_NO_DEPRECATE_WARN_END
+}
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 4cb7492e3a..b03f59ea26 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -66,6 +66,8 @@ bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input
return input0.GetDataType() == input1.GetDataType();
}
+using TensorInfos = std::vector<TensorInfo>;
+
bool IsActivationSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -73,7 +75,14 @@ bool IsActivationSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsActivationSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Activation,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsAdditionSupported(const BackendId& backend,
@@ -88,7 +97,14 @@ bool IsAdditionSupported(const BackendId& backend,
return false;
}
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Addition,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsArgMinMaxSupported(const BackendId& backend,
@@ -98,7 +114,14 @@ bool IsArgMinMaxSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::ArgMinMax,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsBatchNormalizationSupported(const BackendId& backend,
@@ -112,15 +135,14 @@ bool IsBatchNormalizationSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
+ TensorInfos infos{input, output, mean, var, beta, gamma};
FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsBatchNormalizationSupported,
- input,
- output,
- mean,
- var,
- beta,
- gamma,
- descriptor);
+ IsLayerSupported,
+ LayerType::BatchNormalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsBatchToSpaceNdSupported(const BackendId& backend,
@@ -130,11 +152,14 @@ bool IsBatchToSpaceNdSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
+ TensorInfos infos{input, output};
FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsBatchToSpaceNdSupported,
- input,
- output,
- descriptor);
+ IsLayerSupported,
+ LayerType::BatchToSpaceNd,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsConcatSupported(const BackendId& backend,
@@ -146,7 +171,20 @@ bool IsConcatSupported(const BackendId& backend,
{
ARMNN_ASSERT(inputs.size() > 0);
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
+ TensorInfos infos;
+ for (const TensorInfo* inputInfo : inputs)
+ {
+ infos.push_back(*inputInfo);
+ }
+ infos.push_back(output);
+
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Concat,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsConstantSupported(const BackendId& backend,
@@ -154,7 +192,14 @@ bool IsConstantSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsConstantSupported, output);
+ TensorInfos infos{output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Constant,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsConvertFp16ToFp32Supported(const BackendId& backend,
@@ -163,7 +208,14 @@ bool IsConvertFp16ToFp32Supported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp16ToFp32Supported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::ConvertFp16ToFp32,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsConvertFp32ToFp16Supported(const BackendId& backend,
@@ -172,7 +224,14 @@ bool IsConvertFp32ToFp16Supported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvertFp32ToFp16Supported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::ConvertFp32ToFp16,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsConvolution2dSupported(const BackendId& backend,
@@ -184,7 +243,14 @@ bool IsConvolution2dSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsConvolution2dSupported, input, output, descriptor, weights, biases);
+ TensorInfos infos{input, output, weights, biases.value()};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Convolution2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsDebugSupported(const BackendId& backend,
@@ -193,7 +259,14 @@ bool IsDebugSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Debug,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsDepthwiseConvolutionSupported(const BackendId& backend,
@@ -205,28 +278,14 @@ bool IsDepthwiseConvolutionSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- if (descriptor.m_DilationX == 1 && descriptor.m_DilationY == 1)
- {
- // Pre 19.05 ArmNN did not have the dilation parameters.
- // This version of IsDepthwiseConvolutionSupported is called for backwards-compatibility
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsDepthwiseConvolutionSupported,
- input,
- output,
- descriptor,
- weights,
- biases);
- }
- else
- {
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsDilatedDepthwiseConvolutionSupported,
- input,
- output,
- descriptor,
- weights,
- biases);
- }
+ TensorInfos infos{input, output, weights, biases.value()};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::DepthwiseConvolution2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsDequantizeSupported(const BackendId& backend,
@@ -235,7 +294,14 @@ bool IsDequantizeSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Dequantize,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsDetectionPostProcessSupported(const BackendId& backend,
@@ -252,7 +318,14 @@ bool IsDivisionSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsDivisionSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Division,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsEqualSupported(const BackendId& backend,
@@ -262,12 +335,14 @@ bool IsEqualSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
+ TensorInfos infos{input0, input1, output};
FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsComparisonSupported,
- input0,
- input1,
- output,
- ComparisonDescriptor(ComparisonOperation::Equal));
+ IsLayerSupported,
+ LayerType::Comparison,
+ infos,
+ ComparisonDescriptor(ComparisonOperation::Equal),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsFakeQuantizationSupported(const BackendId& backend,
@@ -276,7 +351,14 @@ bool IsFakeQuantizationSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFakeQuantizationSupported, input, descriptor);
+ TensorInfos infos{input};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::FakeQuantization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsFloorSupported(const BackendId& backend,
@@ -291,8 +373,16 @@ bool IsFloorSupported(const BackendId& backend,
return false;
}
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFloorSupported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Floor,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
+
bool IsFullyConnectedSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -302,7 +392,14 @@ bool IsFullyConnectedSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
+ TensorInfos infos{input, output, weights, biases};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::FullyConnected,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsGatherSupported(const BackendId& backend,
@@ -313,7 +410,14 @@ bool IsGatherSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsGatherSupported, input0, input1, output, descriptor);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Gather,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsGreaterSupported(const BackendId& backend,
@@ -323,12 +427,14 @@ bool IsGreaterSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
+ TensorInfos infos{input0, input1, output};
FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsComparisonSupported,
- input0,
- input1,
- output,
- ComparisonDescriptor(ComparisonOperation::Greater));
+ IsLayerSupported,
+ LayerType::Comparison,
+ infos,
+ ComparisonDescriptor(ComparisonOperation::Greater),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsInputSupported(const BackendId& backend,
@@ -336,7 +442,14 @@ bool IsInputSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsInputSupported, input);
+ TensorInfos infos{input};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Input,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
@@ -347,7 +460,14 @@ bool IsL2NormalizationSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsL2NormalizationSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::L2Normalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
@@ -358,9 +478,14 @@ bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const Te
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsLstmSupported, input, outputStateIn, cellStateIn,
- scratchBuffer, outputStateOut, cellStateOut,
- output, descriptor, paramsInfo);
+ TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Lstm,
+ infos,
+ descriptor,
+ paramsInfo,
+ EmptyOptional());
}
bool IsMaximumSupported(const BackendId& backend,
@@ -370,7 +495,14 @@ bool IsMaximumSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMaximumSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Maximum,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsMeanSupported(const BackendId& backend,
@@ -380,7 +512,14 @@ bool IsMeanSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMeanSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Mean,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsMemCopySupported(const BackendId &backend,
@@ -389,7 +528,14 @@ bool IsMemCopySupported(const BackendId &backend,
char *reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::MemCopy,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsMemImportSupported(const BackendId &backend,
@@ -398,7 +544,14 @@ bool IsMemImportSupported(const BackendId &backend,
char *reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemImportSupported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::MemImport,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsMergeSupported(const BackendId& backend,
@@ -408,7 +561,14 @@ bool IsMergeSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Merge,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsMinimumSupported(const BackendId& backend,
@@ -418,7 +578,14 @@ bool IsMinimumSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Minimum,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsMultiplicationSupported(const BackendId& backend,
@@ -428,7 +595,14 @@ bool IsMultiplicationSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsMultiplicationSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Multiplication,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsNormalizationSupported(const BackendId& backend,
@@ -438,7 +612,14 @@ bool IsNormalizationSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsNormalizationSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Normalization,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsOutputSupported(const BackendId& backend,
@@ -446,7 +627,14 @@ bool IsOutputSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsOutputSupported, output);
+ TensorInfos infos{output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Output,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());;
}
bool IsPadSupported(const BackendId& backend,
@@ -456,8 +644,14 @@ bool IsPadSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
-
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Pad,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsQuantizeSupported(const BackendId& backend,
@@ -466,7 +660,14 @@ bool IsQuantizeSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Quantize,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsQLstmSupported(const BackendId& backend,
@@ -482,8 +683,14 @@ bool IsQLstmSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsQLstmSupported, input, previousOutputIn, previousCellStateIn,
- outputStateOut, cellStateOut, output, descriptor, paramsInfo);
+ TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::QLstm,
+ infos,
+ descriptor,
+ paramsInfo,
+ EmptyOptional());
}
bool IsQuantizedLstmSupported(const BackendId& backend,
@@ -497,8 +704,14 @@ bool IsQuantizedLstmSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizedLstmSupported, input, previousCellStateIn, previousOutputIn,
- cellStateOut, output, paramsInfo);
+ TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::QuantizedLstm,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ paramsInfo);
}
@@ -509,7 +722,14 @@ bool IsPermuteSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsPermuteSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Permute,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsPooling2dSupported(const BackendId& backend,
@@ -519,7 +739,14 @@ bool IsPooling2dSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsPooling2dSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Pooling2d,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsPreluSupported(const BackendId& backend,
@@ -529,7 +756,14 @@ bool IsPreluSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
+ TensorInfos infos{input, alpha, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Prelu,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsReduceSupported(const BackendId& backend,
@@ -539,7 +773,14 @@ bool IsReduceSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsReduceSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Reduce,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsReshapeSupported(const BackendId& backend,
@@ -549,7 +790,14 @@ bool IsReshapeSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Reshape,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsResizeSupported(const BackendId& backend,
@@ -559,7 +807,14 @@ bool IsResizeSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsResizeSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Resize,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsSoftmaxSupported(const BackendId& backend,
@@ -569,7 +824,14 @@ bool IsSoftmaxSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSoftmaxSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Softmax,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsSpaceToBatchNdSupported(const BackendId& backend,
@@ -579,7 +841,14 @@ bool IsSpaceToBatchNdSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::SpaceToBatchNd,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsSpaceToDepthSupported(const BackendId& backend,
@@ -589,7 +858,14 @@ bool IsSpaceToDepthSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToDepthSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::SpaceToDepth,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsSplitterSupported(const BackendId& backend,
@@ -599,7 +875,19 @@ bool IsSplitterSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
+ TensorInfos infos{input};
+ for (TensorInfo outInfo : outputs)
+ {
+ infos.push_back(outInfo);
+ }
+
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Splitter,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsStridedSliceSupported(const BackendId& backend,
@@ -609,7 +897,14 @@ bool IsStridedSliceSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsStridedSliceSupported, input, output, descriptor);
+ TensorInfos infos{input, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::StridedSlice,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsSubtractionSupported(const BackendId& backend,
@@ -619,7 +914,14 @@ bool IsSubtractionSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output);
+ TensorInfos infos{input0, input1, output};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Subtraction,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
bool IsSwitchSupported(const BackendId& backend,
@@ -630,7 +932,14 @@ bool IsSwitchSupported(const BackendId& backend,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1);
+ TensorInfos infos{input0, input1, output0, output1};
+ FORWARD_LAYER_SUPPORT_FUNC(backend,
+ IsLayerSupported,
+ LayerType::Switch,
+ infos,
+ BaseDescriptor(),
+ EmptyOptional(),
+ EmptyOptional());
}
} // namespace armnn
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 900d4d6335..a3949affce 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -142,6 +142,29 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
class MockLayerSupport : public LayerSupportBase
{
public:
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
+ const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
+ Optional<std::string&> reasonIfUnsupported) const override
+ {
+ switch (type)
+ {
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Activation:
+ return IsActivationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ default:
+ return false;
+ }
+ }
+
bool IsInputSupported(const TensorInfo& /*input*/,
Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 220590e197..89a0772602 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -4,13 +4,13 @@
//
#include <armnn/Deprecated.hpp>
-#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
#include <armnn/Types.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
namespace
{
@@ -37,6 +37,51 @@ bool DefaultLayerSupport(const char* func,
namespace armnn
{
+bool LayerSupportBase::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>&,
+ const Optional<QuantizedLstmInputParamsInfo>&,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch(type)
+ {
+ case LayerType::MemCopy:
+ return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::MemImport:
+ return IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::StandIn:
+ {
+ auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor));
+
+ if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs))
+ {
+ throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal "
+ "the combined number of input and output slots assigned "
+ "to the StandIn descriptor");
+ }
+
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < desc.m_NumInputs; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ std::vector<const TensorInfo*> outputInfos;
+ for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++)
+ {
+ outputInfos.push_back(&infos[i]);
+ }
+
+ return IsStandInSupported(inputInfos,
+ outputInfos,
+ desc,
+ reasonIfUnsupported);
+ }
+ default:
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+ }
+}
+
bool LayerSupportBase::IsActivationSupported(const TensorInfo&, // input
const TensorInfo&, //output
const ActivationDescriptor&, // descriptor
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index ef947aaa3b..3d9c968d06 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -13,21 +13,33 @@ namespace armnn
class LayerSupportBase : public ILayerSupport
{
public:
+
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo = EmptyOptional(),
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo = EmptyOptional(),
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsArgMinMaxSupported(const TensorInfo& input,
const TensorInfo& output,
const ArgMinMaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsBatchNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& mean,
@@ -37,31 +49,37 @@ public:
const BatchNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsBatchToSpaceNdSupported(const TensorInfo& input,
const TensorInfo& output,
const BatchToSpaceNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsCastSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsChannelShuffleSupported(const TensorInfo& input,
const TensorInfo& output,
const ChannelShuffleDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsComparisonSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const ComparisonDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -69,6 +87,7 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -77,11 +96,13 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsConvertFp32ToFp16Supported(
const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsConvolution2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Convolution2dDescriptor& descriptor,
@@ -89,6 +110,7 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsConvolution3dSupported(const TensorInfo& input,
const TensorInfo& output,
const Convolution3dDescriptor& descriptor,
@@ -96,15 +118,18 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsDepthToSpaceSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthToSpaceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
@@ -112,6 +137,7 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsDequantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -126,6 +152,7 @@ public:
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
@@ -134,29 +161,35 @@ public:
Optional<std::string&> reasonIfUnsupported =
EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsElementwiseUnarySupported(const TensorInfo& input,
const TensorInfo& output,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
virtual bool IsFillSupported(const TensorInfo& input,
const TensorInfo& output,
const FillDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsFullyConnectedSupported(const TensorInfo& input,
const TensorInfo& output,
const TensorInfo& weights,
@@ -164,42 +197,50 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const GatherDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsInstanceNormalizationSupported(
const TensorInfo& input,
const TensorInfo& output,
const InstanceNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsL2NormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsLogicalBinarySupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const LogicalBinaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsLogicalUnarySupported(const TensorInfo& input,
const TensorInfo& output,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsLstmSupported(const TensorInfo& input,
const TensorInfo& outputStateIn,
const TensorInfo& cellStateIn,
@@ -211,11 +252,13 @@ public:
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsMaximumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsMeanSupported(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
@@ -234,57 +277,69 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsMinimumSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsPadSupported(const TensorInfo& input,
const TensorInfo& output,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsPermuteSupported(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsPooling2dSupported(const TensorInfo& input,
const TensorInfo& output,
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsPooling3dSupported(const TensorInfo& input,
const TensorInfo& output,
const Pooling3dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsPreCompiledSupported(const TensorInfo& input,
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsPreluSupported(const TensorInfo& input,
const TensorInfo& alpha,
const TensorInfo& output,
Optional<std::string &> reasonIfUnsupported) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsQuantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsQLstmSupported(const TensorInfo& input,
const TensorInfo& previousOutputIn,
const TensorInfo& previousCellStateIn,
@@ -303,20 +358,24 @@ public:
const QuantizedLstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsRankSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsReduceSupported(const TensorInfo& input,
const TensorInfo& output,
const ReduceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsReshapeSupported(const TensorInfo& input,
const TensorInfo& output,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsResizeSupported(const TensorInfo& input,
const TensorInfo& output,
const ResizeDescriptor& descriptor,
@@ -326,31 +385,37 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSpaceToBatchNdSupported(const TensorInfo& input,
const TensorInfo& output,
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSpaceToDepthSupported(const TensorInfo& input,
const TensorInfo& output,
const SpaceToDepthDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSplitterSupported(const TensorInfo& input,
const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
const TensorInfo& output,
const StackDescriptor& descriptor,
@@ -361,22 +426,26 @@ public:
const StandInDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsSwitchSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output0,
const TensorInfo& output1,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsTransposeConvolution2dSupported(
const TensorInfo& input,
const TensorInfo& output,
@@ -385,11 +454,13 @@ public:
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsTransposeSupported(const TensorInfo& input,
const TensorInfo& output,
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "22.08")
bool IsUnidirectionalSequenceLstmSupported(
const TensorInfo& input,
const TensorInfo& outputStateIn,
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index 49e90791bf..0d06595ebe 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -1450,12 +1450,11 @@ void CreateReferenceDynamicBackendTestImpl()
TensorInfo outputInfo(outputShape, DataType::Float32);
TensorInfo weightInfo(weightShape, DataType::Float32);
Convolution2dDescriptor convolution2dDescriptor;
+ std::vector<TensorInfo> infos = {inputInfo, outputInfo, weightInfo, TensorInfo()};
bool referenceConvolution2dSupported =
- referenceLayerSupport->IsConvolution2dSupported(inputInfo,
- outputInfo,
- convolution2dDescriptor,
- weightInfo,
- EmptyOptional());
+ referenceLayerSupport->IsLayerSupported(LayerType::Convolution2d,
+ infos,
+ convolution2dDescriptor);
CHECK(referenceConvolution2dSupported);
// Test the backend instance by creating a workload
@@ -1535,12 +1534,11 @@ void CreateSampleDynamicBackendTestImpl()
TensorInfo outputInfo(outputShape, DataType::Float32);
TensorInfo weightInfo(weightShape, DataType::Float32);
Convolution2dDescriptor convolution2dDescriptor;
- bool sampleConvolution2dSupported =
- sampleLayerSupport->IsConvolution2dSupported(inputInfo,
- outputInfo,
- convolution2dDescriptor,
- weightInfo,
- EmptyOptional());
+ std::vector<TensorInfo> infos = {inputInfo, outputInfo, weightInfo, TensorInfo()};
+ bool referenceConvolution2dSupported =
+ referenceLayerSupport->IsLayerSupported(LayerType::Convolution2d,
+ infos,
+ convolution2dDescriptor);
CHECK(!sampleConvolution2dSupported);
// Test the backend instance by creating a workload
diff --git a/src/backends/backendsCommon/test/MockBackend.hpp b/src/backends/backendsCommon/test/MockBackend.hpp
index 3a5e79a224..df133dfed2 100644
--- a/src/backends/backendsCommon/test/MockBackend.hpp
+++ b/src/backends/backendsCommon/test/MockBackend.hpp
@@ -172,6 +172,55 @@ public:
class MockLayerSupport : public LayerSupportBase
{
public:
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
+ const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
+ Optional<std::string&> reasonIfUnsupported) const override
+ {
+ switch(type)
+ {
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Convolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d "
+ "TensorInfos. TensorInfos should be of format: "
+ "{input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ default:
+ return false;
+ }
+ }
+
bool IsInputSupported(const TensorInfo& /*input*/,
Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
diff --git a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
index 75e637efdf..380ce4a3f5 100644
--- a/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
+++ b/src/backends/backendsCommon/test/mockBackend/MockImportLayerSupport.hpp
@@ -14,6 +14,30 @@ namespace armnn
class MockImportLayerSupport : public LayerSupportBase
{
public:
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& /*descriptor*/,
+ const Optional<LstmInputParamsInfo>& /*lstmParamsInfo*/,
+ const Optional<QuantizedLstmInputParamsInfo>& /*quantizedLstmParamsInfo*/,
+ Optional<std::string&> reasonIfUnsupported) const override
+ {
+ switch(type)
+ {
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::MemCopy:
+ return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::MemImport:
+ return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+ default:
+ return false;
+ }
+ }
+
bool IsAdditionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index afcaf566a9..e5204e4d5b 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -7,7 +7,6 @@
#include "ClBackendId.hpp"
#include "ClBackendModelContext.hpp"
-#include <armnn/Descriptors.hpp>
#include <armnn/BackendRegistry.hpp>
#include <InternalTypes.hpp>
@@ -177,6 +176,415 @@ ClLayerSupport::ClLayerSupport()
{
}
+bool ClLayerSupport::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch (type)
+ {
+ case LayerType::Activation:
+ return IsActivationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ArgMinMax:
+ return IsArgMinMaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchNormalization:
+ return IsBatchNormalizationSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchToSpaceNd:
+ return IsBatchToSpaceNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Comparison:
+ return IsComparisonSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Concat:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < (infos.size() - 1); i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsConcatSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Constant:
+ return IsConstantSupported(infos[0], reasonIfUnsupported);
+ case LayerType::ConvertFp16ToFp32:
+ return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToFp16:
+ return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Convolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::DepthToSpace:
+ return IsDepthToSpaceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::DepthwiseConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Dequantize:
+ return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Division:
+ return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ElementwiseUnary:
+ return IsElementwiseUnarySupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Fill:
+ return IsFillSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Floor:
+ return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::FullyConnected:
+ return IsFullyConnectedSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Gather:
+ return IsGatherSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::InstanceNormalization:
+ return IsInstanceNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::L2Normalization:
+ return IsL2NormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogicalBinary:
+ return IsLogicalBinarySupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogSoftmax:
+ return IsLogSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Lstm:
+ return IsLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::QLstm:
+ return IsQLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Maximum:
+ return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Mean:
+ return IsMeanSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Minimum:
+ return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Multiplication:
+ return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Normalization:
+ return IsNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Pad:
+ return IsPadSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Permute:
+ return IsPermuteSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Pooling2d:
+ return IsPooling2dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Prelu:
+ return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Quantize:
+ return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::QuantizedLstm:
+ return IsQuantizedLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ quantizedLstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Reshape:
+ return IsReshapeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Resize:
+ return IsResizeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Reduce:
+ return IsReduceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Slice:
+ return IsSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Softmax:
+ return IsSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToBatchNd:
+ return IsSpaceToBatchNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToDepth:
+ return IsSpaceToDepthSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Splitter:
+ {
+ std::vector<TensorInfo> outputInfos;
+ for (uint32_t i = 1; i < infos.size(); i++)
+ {
+ outputInfos.push_back(infos[i]);
+ }
+ return IsSplitterSupported(infos[0],
+ {outputInfos.begin(), outputInfos.end()},
+ *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Stack:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < infos.size() - 1; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsStackSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::StridedSlice:
+ return IsStridedSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Subtraction:
+ return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Transpose:
+ return IsTransposeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::TransposeConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Cast:
+ return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ChannelShuffle:
+ return IsChannelShuffleSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Convolution3d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::MemCopy:
+ return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::MemImport:
+ return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Map:
+ return true;
+ case LayerType::Unmap:
+ return true;
+ case LayerType::Merge:
+ return LayerSupportBase::IsMergeSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Rank:
+ return true;
+ case LayerType::Shape:
+ return LayerSupportBase::IsShapeSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::ConvertBf16ToFp32:
+ return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ case LayerType::ConvertFp32ToBf16:
+ return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ default:
+ // layers not supported in cl by default:
+ // debug, detectionpostprocess, fakequantization, precompiled,
+ // standin, switch, unidirectionalsequencelstm, pooling3d
+ return false;
+ }
+}
+
bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -286,7 +694,7 @@ bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index e75aedaa6a..0300fc05c3 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -18,6 +18,13 @@ public:
ClLayerSupport();
~ClLayerSupport() {}
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -64,7 +71,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index d5dd238bd8..2b2229a4de 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -7,7 +7,6 @@
#include "NeonBackendId.hpp"
#include "NeonBackendModelContext.hpp"
-#include <armnn/Descriptors.hpp>
#include <armnn/Exceptions.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Types.hpp>
@@ -146,6 +145,424 @@ NeonLayerSupport::NeonLayerSupport()
{
}
+bool NeonLayerSupport::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch (type)
+ {
+ case LayerType::Activation:
+ return IsActivationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ArgMinMax:
+ return IsArgMinMaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchNormalization:
+ return IsBatchNormalizationSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchToSpaceNd:
+ return IsBatchToSpaceNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Comparison:
+ return IsComparisonSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Concat:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < (infos.size() - 1); i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsConcatSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Constant:
+ return IsConstantSupported(infos[0], reasonIfUnsupported);
+ case LayerType::ConvertBf16ToFp32:
+ return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp16ToFp32:
+ return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToBf16:
+ return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToFp16:
+ return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Convolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::DepthToSpace:
+ return IsDepthToSpaceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::DepthwiseConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Dequantize:
+ return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Division:
+ return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ElementwiseUnary:
+ return IsElementwiseUnarySupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Fill:
+ return IsFillSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Floor:
+ return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::FullyConnected:
+ return IsFullyConnectedSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Gather:
+ return IsGatherSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::InstanceNormalization:
+ return IsInstanceNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::L2Normalization:
+ return IsL2NormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogicalBinary:
+ return IsLogicalBinarySupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogSoftmax:
+ return IsLogSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Lstm:
+ return IsLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::QLstm:
+ return IsQLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Maximum:
+ return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Mean:
+ return IsMeanSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Minimum:
+ return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Multiplication:
+ return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Normalization:
+ return IsNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Pad:
+ return IsPadSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Permute:
+ return IsPermuteSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Pooling2d:
+ return IsPooling2dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Prelu:
+ return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Quantize:
+ return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::QuantizedLstm:
+ return IsQuantizedLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ quantizedLstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Reshape:
+ return IsReshapeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Resize:
+ return IsResizeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Reduce:
+ return IsReduceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Slice:
+ return IsSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Softmax:
+ return IsSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToBatchNd:
+ return IsSpaceToBatchNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToDepth:
+ return IsSpaceToDepthSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Splitter:
+ {
+ std::vector<TensorInfo> outputInfos;
+ for (uint32_t i = 1; i < infos.size(); i++)
+ {
+ outputInfos.push_back(infos[i]);
+ }
+ return IsSplitterSupported(infos[0],
+ {outputInfos.begin(), outputInfos.end()},
+ *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Stack:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < infos.size() - 1; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsStackSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::StridedSlice:
+ return IsStridedSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Subtraction:
+ return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Transpose:
+ return IsTransposeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::TransposeConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Cast:
+ return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ChannelShuffle:
+ return IsChannelShuffleSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Convolution3d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::MemCopy:
+ return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::MemImport:
+ return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::DetectionPostProcess:
+ {
+ auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
+ return LayerSupportBase::IsDetectionPostProcessSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ desc,
+ reasonIfUnsupported);
+ }
+ case LayerType::Map:
+ return true;
+ case LayerType::Unmap:
+ return true;
+ case LayerType::Merge:
+ return LayerSupportBase::IsMergeSupported(infos[0],
+ infos[1],
+ infos[2],
+ reasonIfUnsupported);
+ case LayerType::Rank:
+ return true;
+ case LayerType::Shape:
+ return LayerSupportBase::IsShapeSupported(infos[0],
+ infos[1],
+ reasonIfUnsupported);
+ default:
+ // layers not supported in neon by default:
+ // debug, fakequantization, precompiled, standin,
+ // switch, unidirectionalsequencelstm, pooling3d
+ return false;
+ }
+}
+
bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -256,7 +673,7 @@ bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 16507c595e..afa9b419e6 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -19,6 +19,13 @@ public:
~NeonLayerSupport() {}
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -65,7 +72,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 4d4f014671..f5798c886f 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -7,9 +7,9 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/Types.hpp>
-#include <armnn/Descriptors.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
#include <LayerSupportCommon.hpp>
#include <backendsCommon/LayerSupportRules.hpp>
@@ -58,6 +58,488 @@ std::string CreateIncorrectDimensionsErrorMsg(unsigned int expected,
} // anonymous namespace
+bool RefLayerSupport::IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch (type)
+ {
+ case LayerType::Activation:
+ return IsActivationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Addition:
+ return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ArgMinMax:
+ return IsArgMinMaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchNormalization:
+ return IsBatchNormalizationSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::BatchToSpaceNd:
+ return IsBatchToSpaceNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Comparison:
+ return IsComparisonSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Concat:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < (infos.size() - 1); i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsConcatSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Constant:
+ return IsConstantSupported(infos[0], reasonIfUnsupported);
+ case LayerType::ConvertBf16ToFp32:
+ return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp16ToFp32:
+ return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToBf16:
+ return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ConvertFp32ToFp16:
+ return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Convolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::DepthToSpace:
+ return IsDepthToSpaceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::DepthwiseConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsDepthwiseConvolutionSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Dequantize:
+ return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Division:
+ return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::ElementwiseUnary:
+ return IsElementwiseUnarySupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Fill:
+ return IsFillSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Floor:
+ return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::FullyConnected:
+ return IsFullyConnectedSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Gather:
+ return IsGatherSupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Input:
+ return IsInputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::InstanceNormalization:
+ return IsInstanceNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::L2Normalization:
+ return IsL2NormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogicalBinary:
+ return IsLogicalBinarySupported(infos[0],
+ infos[1],
+ infos[2],
+ *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::LogSoftmax:
+ return IsLogSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Lstm:
+ return IsLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::QLstm:
+ return IsQLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ case LayerType::Maximum:
+ return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Mean:
+ return IsMeanSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Minimum:
+ return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Multiplication:
+ return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Normalization:
+ return IsNormalizationSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Output:
+ return IsOutputSupported(infos[0], reasonIfUnsupported);
+ case LayerType::Pad:
+ return IsPadSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Permute:
+ return IsPermuteSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Pooling2d:
+ return IsPooling2dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Prelu:
+ return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Quantize:
+ return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Reshape:
+ return IsReshapeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Resize:
+ return IsResizeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Reduce:
+ return IsReduceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Slice:
+ return IsSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Softmax:
+ return IsSoftmaxSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToBatchNd:
+ return IsSpaceToBatchNdSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::SpaceToDepth:
+ return IsSpaceToDepthSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Splitter:
+ {
+ std::vector<TensorInfo> outputInfos;
+ for (uint32_t i = 1; i < infos.size(); i++)
+ {
+ outputInfos.push_back(infos[i]);
+ }
+ return IsSplitterSupported(infos[0],
+ {outputInfos.begin(), outputInfos.end()},
+ *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::Stack:
+ {
+ std::vector<const TensorInfo*> inputInfos;
+ for (uint32_t i = 0; i < infos.size() - 1; i++)
+ {
+ inputInfos.push_back(&infos[i]);
+ }
+ return IsStackSupported(inputInfos,
+ infos[infos.size() - 1],
+ *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ }
+ case LayerType::StridedSlice:
+ return IsStridedSliceSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Subtraction:
+ return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::Transpose:
+ return IsTransposeSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::TransposeConvolution2d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsTransposeConvolution2dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Cast:
+ return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::ChannelShuffle:
+ return IsChannelShuffleSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Convolution3d:
+ {
+ if (infos.size() != 4)
+ {
+ throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
+ "TensorInfos should be of format: {input, output, weights, biases}.");
+ }
+
+ auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
+ if (infos[3] == TensorInfo())
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ EmptyOptional(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsConvolution3dSupported(infos[0],
+ infos[1],
+ desc,
+ infos[2],
+ infos[3],
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Debug:
+ return IsDebugSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::DetectionPostProcess:
+ return IsDetectionPostProcessSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ infos[6],
+ *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>
+ (&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::FakeQuantization:
+ return IsFakeQuantizationSupported(infos[0],
+ *(PolymorphicDowncast<const FakeQuantizationDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::MemCopy:
+ return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Rank:
+ return IsRankSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Shape:
+ return IsShapeSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::UnidirectionalSequenceLstm:
+ {
+ if (infos.size() != 6)
+ {
+ throw InvalidArgumentException("Invalid number of UnidirectionalSequenceLstm TensorInfos. TensorInfos "
+ "should be of format: {input, outputStateIn, cellStateIn, "
+ "hiddenStateOutputVal, cellStateOutputVal, output}");
+ }
+ auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
+
+ bool isHiddenStateOutputOptional = (infos[4] == TensorInfo());
+ bool isCellStateOutput = (infos[5] == TensorInfo());
+ if (isHiddenStateOutputOptional && isCellStateOutput)
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ EmptyOptional(),
+ EmptyOptional(),
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ else if (isHiddenStateOutputOptional)
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ EmptyOptional(),
+ infos[5],
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ else if (isCellStateOutput)
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ EmptyOptional(),
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ else
+ {
+ return IsUnidirectionalSequenceLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ infos[5],
+ desc,
+ lstmParamsInfo.value(),
+ reasonIfUnsupported);
+ }
+ }
+ case LayerType::Pooling3d:
+ return IsPooling3dSupported(infos[0],
+ infos[1],
+ *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
+ reasonIfUnsupported);
+ case LayerType::Map:
+ return true;
+ case LayerType::Unmap:
+ return true;
+ case LayerType::MemImport:
+ return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
+ case LayerType::Merge:
+ return LayerSupportBase::IsMergeSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
+ case LayerType::QuantizedLstm:
+ return LayerSupportBase::IsQuantizedLstmSupported(infos[0],
+ infos[1],
+ infos[2],
+ infos[3],
+ infos[4],
+ quantizedLstmInputParamsInfo.value(),
+ reasonIfUnsupported);
+ default:
+ // layers not supported in neon by default:
+ // precompiled, standin, switch
+ return false;
+ }
+}
+
bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -391,7 +873,7 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
IgnoreUnused(descriptor);
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 61d0556746..b787d25fbd 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -12,6 +12,13 @@ namespace armnn
class RefLayerSupport : public LayerSupportBase
{
public:
+ bool IsLayerSupported(const LayerType& type,
+ const std::vector<TensorInfo>& infos,
+ const BaseDescriptor& descriptor,
+ const Optional<LstmInputParamsInfo>& lstmParamsInfo,
+ const Optional<QuantizedLstmInputParamsInfo>&,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
@@ -58,7 +65,7 @@ public:
bool IsConcatSupported(const std::vector<const TensorInfo*> inputs,
const TensorInfo& output,
- const ConcatDescriptor& descriptor,
+ const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsConstantSupported(const TensorInfo& output,