aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2018-10-15 11:47:37 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commitbcf9f16605e8ce084a0c188abd16ee2bd56e59f4 (patch)
tree8139fee67272f7ec8b68a018e9ed56204adb8d07
parent7420e55aefe545452639992ab1972fd355a9ed30 (diff)
downloadarmnn-bcf9f16605e8ce084a0c188abd16ee2bd56e59f4.tar.gz
IVGCVSW-2002: Get rid of IsLayerSupportedCl functions in favor of ILayerSupport interface
Change-Id: Ic9172a5534eb243f3467996dd30c4400bc06224e
-rw-r--r--src/backends/cl/ClLayerSupport.cpp766
-rw-r--r--src/backends/cl/ClLayerSupport.hpp163
-rwxr-xr-xsrc/backends/cl/test/ClLayerTests.cpp3
3 files changed, 219 insertions, 713 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 7c66348b98..6a49a80c7f 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -5,10 +5,9 @@
#include "ClLayerSupport.hpp"
-#include <InternalTypes.hpp>
-#include <LayerSupportCommon.hpp>
-
#include <armnn/Descriptors.hpp>
+#include <armnn/InternalTypes.hpp>
+#include <armnn/LayerSupportCommon.hpp>
#include <boost/core/ignore_unused.hpp>
@@ -38,304 +37,9 @@ using namespace boost;
namespace armnn
{
-bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsActivationSupportedCl(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsAdditionSupportedCl(input0, input1, output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsBatchNormalizationSupportedCl(input,
- output,
- mean,
- var,
- beta,
- gamma,
- descriptor,
- reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsConstantSupportedCl(output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsConvertFp16ToFp32SupportedCl(input, output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsConvertFp32ToFp16SupportedCl(input, output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsConvolution2dSupportedCl(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsDepthwiseConvolutionSupportedCl(input,
- output,
- descriptor,
- weights,
- biases,
- reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsDivisionSupportedCl(input0, input1, output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsFakeQuantizationSupportedCl(input, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsFloorSupportedCl(input, output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsFullyConnectedSupportedCl(input,
- output,
- weights,
- biases,
- descriptor,
- reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsInputSupportedCl(input, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsL2NormalizationSupportedCl(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
- const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn,
- const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias,
- const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsLstmSupportedCl(input,
- outputStateIn,
- cellStateIn,
- scratchBuffer,
- outputStateOut,
- cellStateOut,
- output,
- descriptor,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- forgetGateBias,
- cellBias,
- outputGateBias,
- inputToInputWeights,
- recurrentToInputWeights,
- cellToInputWeights,
- inputGateBias,
- projectionWeights,
- projectionBias,
- cellToForgetWeights,
- cellToOutputWeights,
- reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsMeanSupportedCl(input, output, descriptor,reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsMergerSupportedCl(inputs, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsMultiplicationSupportedCl(input0, input1, output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsNormalizationSupportedCl(input,
- output,
- descriptor,
- reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsOutputSupportedCl(output, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsPadSupportedCl(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsPermuteSupportedCl(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsPooling2dSupportedCl(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsReshapeSupportedCl(input, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsResizeBilinearSupportedCl(input, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsSoftmaxSupportedCl(input, output, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsSplitterSupportedCl(input, descriptor, reasonIfUnsupported);
-}
-
-bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
-{
- return armnn::IsSubtractionSupportedCl(input0, input1, output, reasonIfUnsupported);
-}
-
-//
-// Implementation functions
-//
-// TODO: Functions kept for backward compatibility. Remove redundant functions
-// once transition to plugable backends is complete.
-
namespace
{
+
template<unsigned int FilterSize>
bool IsMatchingSize2d(const TensorInfo& weightInfo)
{
@@ -353,7 +57,7 @@ template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
bool IsMatchingStride(uint32_t actualStride)
{
return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
-};
+}
bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
{
@@ -394,8 +98,6 @@ inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIf
return IsClBackendSupported(reasonIfUnsupported);
#endif
-} //namespace
-
template<typename FloatFunc, typename Uint8Func, typename ... Params>
bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
DataType dataType,
@@ -412,10 +114,12 @@ bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
std::forward<Params>(params)...);
}
-bool IsActivationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+} // anonymous namespace
+
+bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ActivationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
reasonIfUnsupported,
@@ -424,10 +128,10 @@ bool IsActivationSupportedCl(const TensorInfo& input,
descriptor);
}
-bool IsAdditionSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
reasonIfUnsupported,
@@ -436,14 +140,14 @@ bool IsAdditionSupportedCl(const TensorInfo& input0,
output);
}
-bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& mean,
+ const TensorInfo& var,
+ const TensorInfo& beta,
+ const TensorInfo& gamma,
+ const BatchNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
reasonIfUnsupported,
@@ -456,8 +160,8 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
descriptor);
}
-bool IsConstantSupportedCl(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
output.GetDataType(),
@@ -465,53 +169,32 @@ bool IsConstantSupportedCl(const TensorInfo& output,
&FalseFuncU8<>);
}
-bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
+bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
- bool isSupported = false;
-
- bool strideXIsOneOrTwo = IsMatchingStride<1, 2>(desc.m_StrideX);
- bool strideXIsThree = IsMatchingStride<3>(desc.m_StrideX);
-
- bool strideYIsOneOrTwo = IsMatchingStride<1, 2>(desc.m_StrideY);
- bool strideYIsThree = IsMatchingStride<3>(desc.m_StrideY);
-
- bool strideIsOneOrTwo = strideXIsOneOrTwo && strideYIsOneOrTwo;
- bool strideIsOneOrTwoOrThree = ( strideXIsOneOrTwo || strideXIsThree ) && ( strideYIsOneOrTwo || strideYIsThree );
-
- // 1x1 convolution with strides of 1,2,3.
- isSupported |= IsMatchingSize2d<1>(weightInfo) && ( strideIsOneOrTwoOrThree );
-
- // 3x3 convolution with strides of 1,2.
- isSupported |= IsMatchingSize2d<3>(weightInfo) && ( strideIsOneOrTwo );
-
- // 5x5 convolution with strides of 1,2
- isSupported |= IsMatchingSize2d<5>(weightInfo) && ( strideIsOneOrTwo );
-
- //Fall back to normal convolution for the asymmetric padding case.
- if (desc.m_PadLeft != desc.m_PadRight ||
- desc.m_PadTop != desc.m_PadBottom)
- {
- //Direct convolution does not support asymmetric padding yet.
- isSupported = false;
- }
-
- return isSupported;
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
}
-bool IsDirectConvolution2dParamsSupportedCl(Optional<std::string&> reasonIfUnsupported,
- const Convolution2dDescriptor& parameters,
- const TensorInfo& weightInfo)
+bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(reasonIfUnsupported);
- return IsClDirectConvolution2dSupported(weightInfo, parameters);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
}
-bool IsConvolution2dSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Convolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
reasonIfUnsupported,
@@ -522,12 +205,12 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
biases);
}
-bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const DepthwiseConvolution2dDescriptor& descriptor,
+ const TensorInfo& weights,
+ const Optional<TensorInfo>& biases,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
reasonIfUnsupported,
@@ -538,10 +221,10 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
biases);
}
-bool IsDivisionSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
reasonIfUnsupported,
@@ -550,25 +233,35 @@ bool IsDivisionSupportedCl(const TensorInfo& input0,
output);
}
-bool IsSubtractionSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
+ const FakeQuantizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
+ ignore_unused(input);
+ ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
+ return false;
+}
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
- reasonIfUnsupported,
- input0,
- input1,
- output);
+bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(output);
+ return IsClBackendSupported(reasonIfUnsupported) &&
+ IsSupportedForDataTypeGeneric(reasonIfUnsupported,
+ input.GetDataType(),
+ &FalseFuncF16<>,
+ &TrueFunc<>,
+ &FalseFuncU8<>);
}
-bool IsFullyConnectedSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const TensorInfo& weights,
+ const TensorInfo& biases,
+ const FullyConnectedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
reasonIfUnsupported,
@@ -579,8 +272,8 @@ bool IsFullyConnectedSupportedCl(const TensorInfo& input,
descriptor);
}
-bool IsInputSupportedCl(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
input.GetDataType(),
@@ -588,17 +281,89 @@ bool IsInputSupportedCl(const TensorInfo& input,
&TrueFunc<>);
}
-bool IsL2NormalizationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const L2NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
+bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
+ const TensorInfo& outputStateIn,
+ const TensorInfo& cellStateIn,
+ const TensorInfo& scratchBuffer,
+ const TensorInfo& outputStateOut,
+ const TensorInfo& cellStateOut,
+ const TensorInfo& output,
+ const LstmDescriptor& descriptor,
+ const TensorInfo& inputToForgetWeights,
+ const TensorInfo& inputToCellWeights,
+ const TensorInfo& inputToOutputWeights,
+ const TensorInfo& recurrentToForgetWeights,
+ const TensorInfo& recurrentToCellWeights,
+ const TensorInfo& recurrentToOutputWeights,
+ const TensorInfo& forgetGateBias,
+ const TensorInfo& cellBias,
+ const TensorInfo& outputGateBias,
+ const TensorInfo* inputToInputWeights,
+ const TensorInfo* recurrentToInputWeights,
+ const TensorInfo* cellToInputWeights,
+ const TensorInfo* inputGateBias,
+ const TensorInfo* projectionWeights,
+ const TensorInfo* projectionBias,
+ const TensorInfo* cellToForgetWeights,
+ const TensorInfo* cellToOutputWeights,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ outputStateIn,
+ cellStateIn,
+ scratchBuffer,
+ outputStateOut,
+ cellStateOut,
+ output,
+ descriptor,
+ inputToForgetWeights,
+ inputToCellWeights,
+ inputToOutputWeights,
+ recurrentToForgetWeights,
+ recurrentToCellWeights,
+ recurrentToOutputWeights,
+ forgetGateBias,
+ cellBias,
+ outputGateBias,
+ inputToInputWeights,
+ recurrentToInputWeights,
+ cellToInputWeights,
+ inputGateBias,
+ projectionWeights,
+ projectionBias,
+ cellToForgetWeights,
+ cellToOutputWeights);
+}
+
+bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const MeanDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
+ ignore_unused(input);
+ ignore_unused(output);
+ ignore_unused(descriptor);
+ ignore_unused(reasonIfUnsupported);
+ return false;
}
-bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+ const OriginsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(descriptor);
return IsSupportedForDataTypeCl(reasonIfUnsupported,
@@ -607,10 +372,10 @@ bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
&FalseFuncU8<>);
}
-bool IsMultiplicationSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
reasonIfUnsupported,
@@ -619,16 +384,16 @@ bool IsMultiplicationSupportedCl(const TensorInfo& input0,
output);
}
-bool IsNormalizationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const NormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
-bool IsOutputSupportedCl(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
output.GetDataType(),
@@ -636,26 +401,46 @@ bool IsOutputSupportedCl(const TensorInfo& output,
&TrueFunc<>);
}
-bool IsPermuteSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
+bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PermuteDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(input);
ignore_unused(output);
FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
}
-bool IsPooling2dSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling2dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
-bool IsResizeBilinearSupportedCl(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input);
+ ignore_unused(reasonIfUnsupported);
+ return true;
+}
+
+bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+ Optional<std::string&> reasonIfUnsupported) const
{
return IsSupportedForDataTypeCl(reasonIfUnsupported,
input.GetDataType(),
@@ -663,18 +448,18 @@ bool IsResizeBilinearSupportedCl(const TensorInfo& input,
&FalseFuncU8<>);
}
-bool IsSoftmaxSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SoftmaxDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
}
-bool IsSplitterSupportedCl(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
+ const ViewsDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(descriptor);
return IsSupportedForDataTypeCl(reasonIfUnsupported,
@@ -683,135 +468,16 @@ bool IsSplitterSupportedCl(const TensorInfo& input,
&TrueFunc<>);
}
-bool IsFakeQuantizationSupportedCl(const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- ignore_unused(input);
- ignore_unused(descriptor);
- ignore_unused(reasonIfUnsupported);
- return false;
-}
-
-bool IsReshapeSupportedCl(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported)
-{
- ignore_unused(input);
- ignore_unused(reasonIfUnsupported);
- return true;
-}
-
-bool IsFloorSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- ignore_unused(output);
- return IsClBackendSupported(reasonIfUnsupported) &&
- IsSupportedForDataTypeGeneric(reasonIfUnsupported,
- input.GetDataType(),
- &FalseFuncF16<>,
- &TrueFunc<>,
- &FalseFuncU8<>);
-}
-
-bool IsLstmSupportedCl(const TensorInfo& input,
- const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn,
- const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias,
- const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights,
- Optional<std::string&> reasonIfUnsupported)
-{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
- reasonIfUnsupported,
- input,
- outputStateIn,
- cellStateIn,
- scratchBuffer,
- outputStateOut,
- cellStateOut,
- output,
- descriptor,
- inputToForgetWeights,
- inputToCellWeights,
- inputToOutputWeights,
- recurrentToForgetWeights,
- recurrentToCellWeights,
- recurrentToOutputWeights,
- forgetGateBias,
- cellBias,
- outputGateBias,
- inputToInputWeights,
- recurrentToInputWeights,
- cellToInputWeights,
- inputGateBias,
- projectionWeights,
- projectionBias,
- cellToForgetWeights,
- cellToOutputWeights);
-}
-
-bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
-{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
- reasonIfUnsupported,
- input,
- output);
-}
-
-bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported)
+bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
reasonIfUnsupported,
- input,
+ input0,
+ input1,
output);
}
-bool IsMeanSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- ignore_unused(input);
- ignore_unused(output);
- ignore_unused(descriptor);
- ignore_unused(reasonIfUnsupported);
- return false;
-}
-
-bool IsPadSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported)
-{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
- reasonIfUnsupported,
- input,
- output,
- descriptor);
-}
-
-}
+} // namespace armnn
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 2d57d10040..6bdeb5a6f6 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -169,165 +169,4 @@ public:
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
};
-bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc);
-bool IsClDepthwiseConvolution2dDescParamsSupported(Optional<std::string&> reasonIfUnsupported,
- const DepthwiseConvolution2dDescriptor& parameters,
- const TensorInfo& weights);
-
-bool IsActivationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsAdditionSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsBatchNormalizationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConstantSupportedCl(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConvolution2dSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsDivisionSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsSubtractionSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsFullyConnectedSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsInputSupportedCl(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsL2NormalizationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsLstmSupportedCl(const TensorInfo& input,
- const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn,
- const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const LstmDescriptor& descriptor,
- const TensorInfo& inputToForgetWeights,
- const TensorInfo& inputToCellWeights,
- const TensorInfo& inputToOutputWeights,
- const TensorInfo& recurrentToForgetWeights,
- const TensorInfo& recurrentToCellWeights,
- const TensorInfo& recurrentToOutputWeights,
- const TensorInfo& forgetGateBias,
- const TensorInfo& cellBias,
- const TensorInfo& outputGateBias,
- const TensorInfo* inputToInputWeights,
- const TensorInfo* recurrentToInputWeights,
- const TensorInfo* cellToInputWeights,
- const TensorInfo* inputGateBias,
- const TensorInfo* projectionWeights,
- const TensorInfo* projectionBias,
- const TensorInfo* cellToForgetWeights,
- const TensorInfo* cellToOutputWeights,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsMergerSupportedCl(const std::vector<const TensorInfo*> inputs,
- const OriginsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsMultiplicationSupportedCl(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsNormalizationSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsOutputSupportedCl(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsPermuteSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsPooling2dSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsResizeBilinearSupportedCl(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsSoftmaxSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsSplitterSupportedCl(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsFakeQuantizationSupportedCl(const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsReshapeSupportedCl(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsFloorSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsMeanSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-bool IsPadSupportedCl(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional());
-
-}
+} // namespace armnn
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 0f8b75f50e..62ce2cb18f 100755
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -84,7 +84,8 @@ BOOST_AUTO_TEST_CASE(Softmax4dSupport)
const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32);
// 4D Softmax should be reported as unsupported on the CL backend
- BOOST_TEST(!armnn::IsSoftmaxSupportedCl(inputInfo, outputInfo, armnn::SoftmaxDescriptor()));
+ armnn::ClLayerSupport layerSupport;
+ BOOST_TEST(!layerSupport.IsSoftmaxSupported(inputInfo, outputInfo, armnn::SoftmaxDescriptor()));
}
// Splitter