aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2022-02-09 14:36:04 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2022-02-09 16:11:31 +0000
commitb28e525233d43b2aaea4da56acdbe9914cb41b5b (patch)
tree9efa418aadf5f075f03e619ffd1817d26b100738
parentd0b4aa97d5389adc6a4c87f5704503b8bb22441e (diff)
downloadarmnn-b28e525233d43b2aaea4da56acdbe9914cb41b5b.tar.gz
IVGCVSW-6399 Remove deprecated code 22.02
* Remove LayerSupport.hpp which was replaced with ILayerSupport interface and the BackendHelper.hpp GetILayerSupportByBackendId() function * Fix bug in backend helper where value of Optional was passed even if Optional had no value. Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I03f1f693abe927a14c1942ef7e21edccc8357b35
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--src/armnn/BackendHelper.cpp165
-rw-r--r--src/armnn/LayerSupport.cpp945
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp23
7 files changed, 102 insertions, 1045 deletions
diff --git a/Android.mk b/Android.mk
index 3b1e6da44f..b7aec59119 100644
--- a/Android.mk
+++ b/Android.mk
@@ -126,7 +126,6 @@ LOCAL_SRC_FILES := \
src/armnn/InternalTypes.cpp \
src/armnn/JsonPrinter.cpp \
src/armnn/Layer.cpp \
- src/armnn/LayerSupport.cpp \
src/armnn/LoadedNetwork.cpp \
src/armnn/Logging.cpp \
src/armnn/Network.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 075790a049..3ba5985efd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -162,7 +162,6 @@ list(APPEND armnn_sources
include/armnn/IRuntime.hpp
include/armnn/IStrategy.hpp
include/armnn/IWorkingMemHandle.hpp
- include/armnn/LayerSupport.hpp
include/armnn/LayerVisitorBase.hpp
include/armnn/Logging.hpp
include/armnn/LstmParams.hpp
@@ -360,7 +359,6 @@ list(APPEND armnn_sources
src/armnn/Layer.hpp
src/armnn/LayersFwd.hpp
src/armnn/LayerSupportCommon.hpp
- src/armnn/LayerSupport.cpp
src/armnn/LoadedNetwork.cpp
src/armnn/LoadedNetwork.hpp
src/armnn/Logging.cpp
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 23c4003107..e5c975994e 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -162,7 +162,7 @@ bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
@@ -177,7 +177,7 @@ bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
@@ -192,7 +192,7 @@ bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
@@ -211,7 +211,7 @@ bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
@@ -226,7 +226,7 @@ bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsCastSupported(const TensorInfo& input,
@@ -240,7 +240,7 @@ bool LayerSupportHandle::IsCastSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input,
@@ -255,7 +255,7 @@ bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
@@ -271,7 +271,7 @@ bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
@@ -291,7 +291,7 @@ bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*>
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output,
@@ -304,7 +304,7 @@ bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input,
@@ -318,7 +318,7 @@ bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input,
@@ -332,7 +332,7 @@ bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
@@ -346,7 +346,7 @@ bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConvertFp32ToFp16Supported(const TensorInfo& input,
@@ -360,7 +360,7 @@ bool LayerSupportHandle::IsConvertFp32ToFp16Supported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
@@ -378,7 +378,7 @@ bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsConvolution3dSupported(const TensorInfo& input,
@@ -396,7 +396,7 @@ bool LayerSupportHandle::IsConvolution3dSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
@@ -410,7 +410,7 @@ bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
@@ -425,7 +425,7 @@ bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
@@ -444,7 +444,7 @@ bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDequantizeSupported(const TensorInfo& input,
@@ -458,7 +458,7 @@ bool LayerSupportHandle::IsDequantizeSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
@@ -478,7 +478,7 @@ bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEn
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
@@ -497,7 +497,7 @@ bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
@@ -512,7 +512,7 @@ bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
@@ -527,7 +527,7 @@ bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
@@ -541,7 +541,7 @@ bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
@@ -556,7 +556,7 @@ bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsFloorSupported(const TensorInfo& input,
@@ -570,7 +570,7 @@ bool LayerSupportHandle::IsFloorSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
@@ -587,18 +587,25 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
{
if(!weights.IsConstant())
{
- reasonIfUnsupported.value() =
- "This backend might not support non constant weights. "
- "If weights are constant make sure to set IsConstant when creating TensorInfo";
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "This backend might not support non constant weights. "
+ "If weights are constant make sure to set IsConstant when creating TensorInfo";
+ }
+
return false;
}
if(descriptor.m_BiasEnabled)
{
if(!biases.IsConstant())
{
- reasonIfUnsupported.value() =
- "This backend might not support non constant bias. "
- "If bias are constant make sure to set IsConstant when creating TensorInfo";
+ if (reasonIfUnsupported.has_value())
+ {
+ reasonIfUnsupported.value() =
+ "This backend might not support non constant weights. "
+ "If weights are constant make sure to set IsConstant when creating TensorInfo";
+ }
return false;
}
}
@@ -613,7 +620,7 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
if(!descriptor.m_ConstantWeights)
{
- auto capability = GetCapability("NonConstWeights", m_BackendId);
+ capability = GetCapability("NonConstWeights", m_BackendId);
if (capability.has_value() && capability.value().GetValue().AsBool() == true)
{
return true;
@@ -629,7 +636,7 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
@@ -645,7 +652,7 @@ bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
@@ -658,7 +665,7 @@ bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsInstanceNormalizationSupported(
@@ -674,7 +681,7 @@ bool LayerSupportHandle::IsInstanceNormalizationSupported(
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
@@ -689,7 +696,7 @@ bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
@@ -705,7 +712,7 @@ bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
@@ -720,7 +727,7 @@ bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
@@ -735,7 +742,7 @@ bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
@@ -756,7 +763,7 @@ bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
descriptor,
paramsInfo,
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
@@ -771,7 +778,7 @@ bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
@@ -786,7 +793,7 @@ bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMemCopySupported(const TensorInfo& input,
@@ -800,7 +807,7 @@ bool LayerSupportHandle::IsMemCopySupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMemImportSupported(const TensorInfo& input,
@@ -814,7 +821,7 @@ bool LayerSupportHandle::IsMemImportSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
@@ -829,7 +836,7 @@ bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
@@ -844,7 +851,7 @@ bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
@@ -859,7 +866,7 @@ bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
@@ -874,7 +881,7 @@ bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsOutputSupported(const TensorInfo& output,
@@ -887,7 +894,7 @@ bool LayerSupportHandle::IsOutputSupported(const TensorInfo& output,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
@@ -902,7 +909,7 @@ bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
@@ -917,7 +924,7 @@ bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
@@ -932,7 +939,7 @@ bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsPooling3dSupported(const TensorInfo& input,
@@ -947,7 +954,7 @@ bool LayerSupportHandle::IsPooling3dSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
@@ -961,7 +968,7 @@ bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
@@ -976,7 +983,7 @@ bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsQuantizeSupported(const TensorInfo& input,
@@ -990,7 +997,7 @@ bool LayerSupportHandle::IsQuantizeSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
@@ -1010,7 +1017,7 @@ bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
descriptor,
paramsInfo,
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
@@ -1028,7 +1035,7 @@ bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
paramsInfo,
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsRankSupported(const TensorInfo& input,
@@ -1042,7 +1049,7 @@ bool LayerSupportHandle::IsRankSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsReduceSupported(const TensorInfo& input,
@@ -1057,7 +1064,7 @@ bool LayerSupportHandle::IsReduceSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
@@ -1072,7 +1079,7 @@ bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
@@ -1087,7 +1094,7 @@ bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
@@ -1101,7 +1108,7 @@ bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
@@ -1116,7 +1123,7 @@ bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
@@ -1131,7 +1138,7 @@ bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
@@ -1146,7 +1153,7 @@ bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
@@ -1161,7 +1168,7 @@ bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
@@ -1180,7 +1187,7 @@ bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
@@ -1200,7 +1207,7 @@ bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>&
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
@@ -1223,7 +1230,7 @@ bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
@@ -1239,7 +1246,7 @@ bool LayerSupportHandle::IsStridedSliceSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
@@ -1254,7 +1261,7 @@ bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
@@ -1270,7 +1277,7 @@ bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
BaseDescriptor(),
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsTransposeConvolution2dSupported(
@@ -1289,7 +1296,7 @@ bool LayerSupportHandle::IsTransposeConvolution2dSupported(
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
@@ -1304,7 +1311,7 @@ bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
descriptor,
EmptyOptional(),
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
bool LayerSupportHandle::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
@@ -1326,7 +1333,7 @@ bool LayerSupportHandle::IsUnidirectionalSequenceLstmSupported(const TensorInfo&
descriptor,
paramsInfo,
EmptyOptional(),
- reasonIfUnsupported.value());
+ reasonIfUnsupported);
}
} \ No newline at end of file
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
deleted file mode 100644
index b03f59ea26..0000000000
--- a/src/armnn/LayerSupport.cpp
+++ /dev/null
@@ -1,945 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <armnn/LayerSupport.hpp>
-#include <armnn/Optional.hpp>
-#include <armnn/backends/ILayerSupport.hpp>
-#include <armnn/BackendRegistry.hpp>
-
-#include <armnn/backends/IBackendInternal.hpp>
-
-#include <armnn/utility/Assert.hpp>
-
-#include <cstring>
-#include <algorithm>
-#include <unordered_map>
-
-namespace
-{
-
-/// Helper function to copy a full string to a truncated version.
-void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
-{
- if(truncatedString != nullptr)
- {
- std::snprintf(truncatedString, maxLength, "%s", fullString);
- }
-}
-
-} // anonymous namespace
-
-namespace armnn
-{
-
-// Helper macro to avoid code duplication.
-// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of backendId.
-#define FORWARD_LAYER_SUPPORT_FUNC(backendId, func, ...) \
- std::string reasonIfUnsupportedFull; \
- bool isSupported; \
- try { \
- auto const& backendRegistry = BackendRegistryInstance(); \
- if (!backendRegistry.IsBackendRegistered(backendId)) \
- { \
- std::stringstream ss; \
- ss << __func__ << " is not supported on " << backendId << " because this backend is not registered."; \
- reasonIfUnsupportedFull = ss.str(); \
- isSupported = false; \
- } \
- else \
- { \
- auto factoryFunc = backendRegistry.GetFactory(backendId); \
- auto backendObject = factoryFunc(); \
- auto layerSupportObject = backendObject->GetLayerSupport(); \
- isSupported = layerSupportObject->func(__VA_ARGS__, Optional<std::string&>(reasonIfUnsupportedFull)); \
- CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
- } \
- } catch (const InvalidArgumentException &e) { \
- /* re-throwing with more context information */ \
- throw InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
- } \
- return isSupported;
-
-bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
-{
- return input0.GetDataType() == input1.GetDataType();
-}
-
-using TensorInfos = std::vector<TensorInfo>;
-
-bool IsActivationSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Activation,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsAdditionSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- if(!CheckTensorDataTypesEqual(input0, input1))
- {
- return false;
- }
-
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Addition,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsArgMinMaxSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const ArgMinMaxDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::ArgMinMax,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsBatchNormalizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output, mean, var, beta, gamma};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::BatchNormalization,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsBatchToSpaceNdSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const BatchToSpaceNdDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::BatchToSpaceNd,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsConcatSupported(const BackendId& backend,
- std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- ARMNN_ASSERT(inputs.size() > 0);
-
- TensorInfos infos;
- for (const TensorInfo* inputInfo : inputs)
- {
- infos.push_back(*inputInfo);
- }
- infos.push_back(output);
-
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Concat,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsConstantSupported(const BackendId& backend,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Constant,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsConvertFp16ToFp32Supported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::ConvertFp16ToFp32,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsConvertFp32ToFp16Supported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::ConvertFp32ToFp16,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsConvolution2dSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output, weights, biases.value()};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Convolution2d,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsDebugSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Debug,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsDepthwiseConvolutionSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output, weights, biases.value()};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::DepthwiseConvolution2d,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsDequantizeSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Dequantize,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsDetectionPostProcessSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const DetectionPostProcessDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength);
-
-bool IsDivisionSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Division,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsEqualSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Comparison,
- infos,
- ComparisonDescriptor(ComparisonOperation::Equal),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsFakeQuantizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::FakeQuantization,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsFloorSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- // By definition (that is, regardless of compute device), shapes and data type must match.
- if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
- {
- return false;
- }
-
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Floor,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsFullyConnectedSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output, weights, biases};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::FullyConnected,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsGatherSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- const GatherDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Gather,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsGreaterSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Comparison,
- infos,
- ComparisonDescriptor(ComparisonOperation::Greater),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsInputSupported(const BackendId& backend,
- const TensorInfo& input,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Input,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-
-bool IsL2NormalizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::L2Normalization,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsLstmSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
- const TensorInfo& output, const LstmDescriptor& descriptor,
- const LstmInputParamsInfo& paramsInfo, char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-
-{
- TensorInfos infos{input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Lstm,
- infos,
- descriptor,
- paramsInfo,
- EmptyOptional());
-}
-
-bool IsMaximumSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Maximum,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsMeanSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Mean,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsMemCopySupported(const BackendId &backend,
- const TensorInfo &input,
- const TensorInfo &output,
- char *reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::MemCopy,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsMemImportSupported(const BackendId &backend,
- const TensorInfo &input,
- const TensorInfo &output,
- char *reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::MemImport,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsMergeSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Merge,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsMinimumSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Minimum,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsMultiplicationSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Multiplication,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsNormalizationSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Normalization,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsOutputSupported(const BackendId& backend,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Output,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());;
-}
-
-bool IsPadSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Pad,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsQuantizeSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Quantize,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsQLstmSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& previousOutputIn,
- const TensorInfo& previousCellStateIn,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const QLstmDescriptor& descriptor,
- const LstmInputParamsInfo& paramsInfo,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-
-{
- TensorInfos infos{input, previousOutputIn, previousCellStateIn, outputStateOut, cellStateOut, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::QLstm,
- infos,
- descriptor,
- paramsInfo,
- EmptyOptional());
-}
-
-bool IsQuantizedLstmSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& previousCellStateIn,
- const TensorInfo& previousOutputIn,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const QuantizedLstmInputParamsInfo& paramsInfo,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-
-{
- TensorInfos infos{input, previousCellStateIn, previousOutputIn, cellStateOut, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::QuantizedLstm,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- paramsInfo);
-}
-
-
-bool IsPermuteSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Permute,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsPooling2dSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Pooling2d,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsPreluSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& alpha,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, alpha, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Prelu,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsReduceSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const ReduceDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Reduce,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsReshapeSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const ReshapeDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Reshape,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsResizeSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const ResizeDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Resize,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsSoftmaxSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Softmax,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsSpaceToBatchNdSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToBatchNdDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::SpaceToBatchNd,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsSpaceToDepthSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToDepthDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::SpaceToDepth,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsSplitterSupported(const BackendId& backend,
- const TensorInfo& input,
- const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
- const ViewsDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input};
- for (TensorInfo outInfo : outputs)
- {
- infos.push_back(outInfo);
- }
-
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Splitter,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsStridedSliceSupported(const BackendId& backend,
- const TensorInfo& input,
- const TensorInfo& output,
- const StridedSliceDescriptor& descriptor,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::StridedSlice,
- infos,
- descriptor,
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsSubtractionSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Subtraction,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-bool IsSwitchSupported(const BackendId& backend,
- const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output0,
- const TensorInfo& output1,
- char* reasonIfUnsupported,
- size_t reasonIfUnsupportedMaxLength)
-{
- TensorInfos infos{input0, input1, output0, output1};
- FORWARD_LAYER_SUPPORT_FUNC(backend,
- IsLayerSupported,
- LayerType::Switch,
- infos,
- BaseDescriptor(),
- EmptyOptional(),
- EmptyOptional());
-}
-
-} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 56874a6a8c..090e2856d8 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -7,7 +7,6 @@
#include <LayersFwd.hpp>
#include <armnn/Types.hpp>
-#include <armnn/LayerSupport.hpp>
#include <armnn/backends/IBackendInternal.hpp>
#include <armnn/backends/ILayerSupport.hpp>
#include <armnn/BackendHelper.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index 9c21c7d01f..6ba3e6b94a 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -6,11 +6,12 @@
#include "NormalizationTestImpl.hpp"
#include <armnn/Exceptions.hpp>
-#include <armnn/LayerSupport.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnn/backends/TensorHandle.hpp>
+#include <armnn/backends/ILayerSupport.hpp>
+#include <armnn/BackendHelper.hpp>
#include <armnnTestUtils/TensorCopyUtils.hpp>
#include <armnnTestUtils/WorkloadTestUtils.hpp>
@@ -350,10 +351,9 @@ LayerTestResult<float,4> CompareNormalizationTestImpl(
// Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
armnn::BackendId backend = workloadFactory.GetBackendId();
- const size_t reasonIfUnsupportedMaxLen = 255;
- char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- ret.m_Supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+ auto handle = armnn::GetILayerSupportByBackendId(backend);
+ ret.m_Supported = handle.IsNormalizationSupported(inputTensorInfo, outputTensorInfo, data.m_Parameters);
+
if (!ret.m_Supported)
{
return ret;
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index 2d2a5922b5..11605f0b28 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -8,8 +8,6 @@
#include <armnnUtils/QuantizeHelper.hpp>
#include <ResolveType.hpp>
-#include <armnn/LayerSupport.hpp>
-
#include <armnnUtils/TensorUtils.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
@@ -17,6 +15,7 @@
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
+#include <armnn/BackendHelper.hpp>
#include <backendsCommon/WorkloadInfo.hpp>
#include <armnnTestUtils/TensorCopyUtils.hpp>
@@ -90,11 +89,11 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
// Don't execute if Pooling is not supported, as an exception will be raised.
armnn::BackendId backend = workloadFactory.GetBackendId();
- const size_t reasonIfUnsupportedMaxLen = 255;
- char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- result.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
- queueDescriptor.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+
+ auto handle = armnn::GetILayerSupportByBackendId(backend);
+ result.m_Supported = handle.IsPooling2dSupported(inputTensorInfo,
+ outputTensorInfo,
+ queueDescriptor.m_Parameters);
if (!result.m_Supported)
{
return result;
@@ -817,11 +816,11 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
// Don't execute if Pooling is not supported, as an exception will be raised.
armnn::BackendId backend = workloadFactory.GetBackendId();
- const size_t reasonIfUnsupportedMaxLen = 255;
- char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- comparisonResult.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
- data.m_Parameters,
- reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+
+ auto handle = armnn::GetILayerSupportByBackendId(backend);
+ comparisonResult.m_Supported = handle.IsPooling2dSupported(inputTensorInfo,
+ outputTensorInfo,
+ data.m_Parameters);
if (!comparisonResult.m_Supported)
{
return comparisonResult;