From bcf9f16605e8ce084a0c188abd16ee2bd56e59f4 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Mon, 15 Oct 2018 11:47:37 +0100 Subject: IVGCVSW-2002: Get rid of IsLayerSupportedCl functions in favor of ILayerSupport interface Change-Id: Ic9172a5534eb243f3467996dd30c4400bc06224e --- src/backends/cl/ClLayerSupport.cpp | 802 ++++++++++------------------------ src/backends/cl/ClLayerSupport.hpp | 163 +------ src/backends/cl/test/ClLayerTests.cpp | 3 +- 3 files changed, 237 insertions(+), 731 deletions(-) diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 7c66348b98..6a49a80c7f 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -5,10 +5,9 @@ #include "ClLayerSupport.hpp" -#include -#include - #include +#include +#include #include @@ -38,12 +37,95 @@ using namespace boost; namespace armnn { +namespace +{ + +template +bool IsMatchingSize2d(const TensorInfo& weightInfo) +{ + // Width & Height must match. + return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize); +} + +template +bool IsMatchingStride(uint32_t actualStride) +{ + return ValidStride == actualStride; +} + +template +bool IsMatchingStride(uint32_t actualStride) +{ + return IsMatchingStride(actualStride) || IsMatchingStride(actualStride); +} + +bool IsClBackendSupported(Optional reasonIfUnsupported) +{ +#if ARMCOMPUTECL_ENABLED + return true; +#else + if (reasonIfUnsupported) + { + reasonIfUnsupported.value() = "The armnn library has been built without CL support"; + } + return false; +#endif +} + +#if ARMCOMPUTECL_ENABLED +#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr) +#else +#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported) +#endif + +#if ARMCOMPUTECL_ENABLED +template +inline bool IsWorkloadSupported(FuncType&& func, Optional reasonIfUnsupported, Args&&... args) +{ + arm_compute::Status aclStatus = func(std::forward(args)...); + const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); + if (!supported && reasonIfUnsupported) + { + reasonIfUnsupported.value() = aclStatus.error_description(); + } + return supported; +} + +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__); +#else +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsClBackendSupported(reasonIfUnsupported); +#endif + +template +bool IsSupportedForDataTypeCl(Optional reasonIfUnsupported, + DataType dataType, + FloatFunc floatFuncPtr, + Uint8Func uint8FuncPtr, + Params&&... params) +{ + return IsClBackendSupported(reasonIfUnsupported) && + IsSupportedForDataTypeGeneric(reasonIfUnsupported, + dataType, + floatFuncPtr, + floatFuncPtr, + uint8FuncPtr, + std::forward(params)...); +} + +} // anonymous namespace + bool ClLayerSupport::IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsActivationSupportedCl(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor); } bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0, @@ -51,7 +133,11 @@ bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsAdditionSupportedCl(input0, input1, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, @@ -63,34 +149,44 @@ bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, const BatchNormalizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsBatchNormalizationSupportedCl(input, - output, - mean, - var, - beta, - gamma, - descriptor, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate, + reasonIfUnsupported, + input, + output, + mean, + var, + beta, + gamma, + descriptor); } bool ClLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsConstantSupportedCl(output, reasonIfUnsupported); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); } bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsConvertFp16ToFp32SupportedCl(input, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate, + reasonIfUnsupported, + input, + output); } bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsConvertFp32ToFp16SupportedCl(input, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate, + reasonIfUnsupported, + input, + output); } bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input, @@ -100,12 +196,13 @@ bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { - return armnn::IsConvolution2dSupportedCl(input, - output, - descriptor, - weights, - biases, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor, + weights, + biases); } bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, @@ -115,12 +212,13 @@ bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { - return armnn::IsDepthwiseConvolutionSupportedCl(input, - output, - descriptor, - weights, - biases, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor, + weights, + biases); } bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0, @@ -128,21 +226,34 @@ bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsDivisionSupportedCl(input0, input1, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool ClLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsFakeQuantizationSupportedCl(input, descriptor, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); + return false; } bool ClLayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsFloorSupportedCl(input, output, reasonIfUnsupported); + ignore_unused(output); + return IsClBackendSupported(reasonIfUnsupported) && + IsSupportedForDataTypeGeneric(reasonIfUnsupported, + input.GetDataType(), + &FalseFuncF16<>, + &TrueFunc<>, + &FalseFuncU8<>); } bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, @@ -152,18 +263,22 @@ bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, const FullyConnectedDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsFullyConnectedSupportedCl(input, - output, - weights, - biases, - descriptor, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate, + reasonIfUnsupported, + input, + output, + weights, + biases, + descriptor); } bool ClLayerSupport::IsInputSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { - return armnn::IsInputSupportedCl(input, reasonIfUnsupported); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, @@ -171,7 +286,11 @@ bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, const L2NormalizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsL2NormalizationSupportedCl(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor); } bool ClLayerSupport::IsLstmSupported(const TensorInfo& input, @@ -201,32 +320,33 @@ bool ClLayerSupport::IsLstmSupported(const TensorInfo& input, const TensorInfo* cellToOutputWeights, Optional reasonIfUnsupported) const { - return armnn::IsLstmSupportedCl(input, - outputStateIn, - cellStateIn, - scratchBuffer, - outputStateOut, - cellStateOut, - output, - descriptor, - inputToForgetWeights, - inputToCellWeights, - inputToOutputWeights, - recurrentToForgetWeights, - recurrentToCellWeights, - recurrentToOutputWeights, - forgetGateBias, - cellBias, - outputGateBias, - inputToInputWeights, - recurrentToInputWeights, - cellToInputWeights, - inputGateBias, - projectionWeights, - projectionBias, - cellToForgetWeights, - cellToOutputWeights, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate, + reasonIfUnsupported, + input, + outputStateIn, + cellStateIn, + scratchBuffer, + outputStateOut, + cellStateOut, + output, + descriptor, + inputToForgetWeights, + inputToCellWeights, + inputToOutputWeights, + recurrentToForgetWeights, + recurrentToCellWeights, + recurrentToOutputWeights, + forgetGateBias, + cellBias, + outputGateBias, + inputToInputWeights, + recurrentToInputWeights, + cellToInputWeights, + inputGateBias, + projectionWeights, + projectionBias, + cellToForgetWeights, + cellToOutputWeights); } bool ClLayerSupport::IsMeanSupported(const TensorInfo& input, @@ -234,14 +354,22 @@ bool ClLayerSupport::IsMeanSupported(const TensorInfo& input, const MeanDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsMeanSupportedCl(input, output, descriptor,reasonIfUnsupported); + ignore_unused(input); + ignore_unused(output); + ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); + return false; } bool ClLayerSupport::IsMergerSupported(const std::vector inputs, const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsMergerSupportedCl(inputs, descriptor, reasonIfUnsupported); + ignore_unused(descriptor); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + inputs[0]->GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); } bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, @@ -249,7 +377,11 @@ bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsMultiplicationSupportedCl(input0, input1, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input, @@ -257,16 +389,16 @@ bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input, const NormalizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsNormalizationSupportedCl(input, - output, - descriptor, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool ClLayerSupport::IsOutputSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsOutputSupportedCl(output, reasonIfUnsupported); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool ClLayerSupport::IsPadSupported(const TensorInfo& input, @@ -274,7 +406,11 @@ bool ClLayerSupport::IsPadSupported(const TensorInfo& input, const PadDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsPadSupportedCl(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate, + reasonIfUnsupported, + input, + output, + descriptor); } bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input, @@ -282,7 +418,9 @@ bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input, const PermuteDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsPermuteSupportedCl(input, output, descriptor, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(output); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor); } bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input, @@ -290,19 +428,24 @@ bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input, const Pooling2dDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsPooling2dSupportedCl(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { - return armnn::IsReshapeSupportedCl(input, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(reasonIfUnsupported); + return true; } bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { - return armnn::IsResizeBilinearSupportedCl(input, reasonIfUnsupported); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &FalseFuncU8<>); } bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input, @@ -310,14 +453,19 @@ bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input, const SoftmaxDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsSoftmaxSupportedCl(input, output, descriptor, reasonIfUnsupported); + ignore_unused(descriptor); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output); } bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input, const ViewsDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsSplitterSupportedCl(input, descriptor, reasonIfUnsupported); + ignore_unused(descriptor); + return IsSupportedForDataTypeCl(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0, @@ -325,493 +473,11 @@ bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsSubtractionSupportedCl(input0, input1, output, reasonIfUnsupported); -} - -// -// Implementation functions -// -// TODO: Functions kept for backward compatibility. Remove redundant functions -// once transition to plugable backends is complete. - -namespace -{ -template -bool IsMatchingSize2d(const TensorInfo& weightInfo) -{ - // Width & Height must match. - return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize); -} - -template -bool IsMatchingStride(uint32_t actualStride) -{ - return ValidStride == actualStride; + FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate, + reasonIfUnsupported, + input0, + input1, + output); } -template -bool IsMatchingStride(uint32_t actualStride) -{ - return IsMatchingStride(actualStride) || IsMatchingStride(actualStride); -}; - -bool IsClBackendSupported(Optional reasonIfUnsupported) -{ -#if ARMCOMPUTECL_ENABLED - return true; -#else - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "The armnn library has been built without CL support"; - } - return false; -#endif -} - -#if ARMCOMPUTECL_ENABLED -#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr) -#else -#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported) -#endif - -#if ARMCOMPUTECL_ENABLED -template -inline bool IsWorkloadSupported(FuncType&& func, Optional reasonIfUnsupported, Args&&... args) -{ - arm_compute::Status aclStatus = func(std::forward(args)...); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - reasonIfUnsupported.value() = aclStatus.error_description(); - } - return supported; -} - -#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ - return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__); -#else -#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ - return IsClBackendSupported(reasonIfUnsupported); -#endif - -} //namespace - -template -bool IsSupportedForDataTypeCl(Optional reasonIfUnsupported, - DataType dataType, - FloatFunc floatFuncPtr, - Uint8Func uint8FuncPtr, - Params&&... params) -{ - return IsClBackendSupported(reasonIfUnsupported) && - IsSupportedForDataTypeGeneric(reasonIfUnsupported, - dataType, - floatFuncPtr, - floatFuncPtr, - uint8FuncPtr, - std::forward(params)...); -} - -bool IsActivationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const ActivationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate, - reasonIfUnsupported, - input, - output, - descriptor); -} - -bool IsAdditionSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsBatchNormalizationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& mean, - const TensorInfo& var, - const TensorInfo& beta, - const TensorInfo& gamma, - const BatchNormalizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate, - reasonIfUnsupported, - input, - output, - mean, - var, - beta, - gamma, - descriptor); -} - -bool IsConstantSupportedCl(const TensorInfo& output, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeCl(reasonIfUnsupported, - output.GetDataType(), - &TrueFunc<>, - &FalseFuncU8<>); -} - -bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc) -{ - bool isSupported = false; - - bool strideXIsOneOrTwo = IsMatchingStride<1, 2>(desc.m_StrideX); - bool strideXIsThree = IsMatchingStride<3>(desc.m_StrideX); - - bool strideYIsOneOrTwo = IsMatchingStride<1, 2>(desc.m_StrideY); - bool strideYIsThree = IsMatchingStride<3>(desc.m_StrideY); - - bool strideIsOneOrTwo = strideXIsOneOrTwo && strideYIsOneOrTwo; - bool strideIsOneOrTwoOrThree = ( strideXIsOneOrTwo || strideXIsThree ) && ( strideYIsOneOrTwo || strideYIsThree ); - - // 1x1 convolution with strides of 1,2,3. - isSupported |= IsMatchingSize2d<1>(weightInfo) && ( strideIsOneOrTwoOrThree ); - - // 3x3 convolution with strides of 1,2. - isSupported |= IsMatchingSize2d<3>(weightInfo) && ( strideIsOneOrTwo ); - - // 5x5 convolution with strides of 1,2 - isSupported |= IsMatchingSize2d<5>(weightInfo) && ( strideIsOneOrTwo ); - - //Fall back to normal convolution for the asymmetric padding case. - if (desc.m_PadLeft != desc.m_PadRight || - desc.m_PadTop != desc.m_PadBottom) - { - //Direct convolution does not support asymmetric padding yet. - isSupported = false; - } - - return isSupported; -} - -bool IsDirectConvolution2dParamsSupportedCl(Optional reasonIfUnsupported, - const Convolution2dDescriptor& parameters, - const TensorInfo& weightInfo) -{ - ignore_unused(reasonIfUnsupported); - return IsClDirectConvolution2dSupported(weightInfo, parameters); -} - -bool IsConvolution2dSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const Convolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate, - reasonIfUnsupported, - input, - output, - descriptor, - weights, - biases); -} - -bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const DepthwiseConvolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate, - reasonIfUnsupported, - input, - output, - descriptor, - weights, - biases); -} - -bool IsDivisionSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsSubtractionSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - - FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsFullyConnectedSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& weights, - const TensorInfo& biases, - const FullyConnectedDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate, - reasonIfUnsupported, - input, - output, - weights, - biases, - descriptor); -} - -bool IsInputSupportedCl(const TensorInfo& input, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeCl(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsL2NormalizationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const L2NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsMergerSupportedCl(const std::vector inputs, - const OriginsDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(descriptor); - return IsSupportedForDataTypeCl(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &FalseFuncU8<>); -} - -bool IsMultiplicationSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsNormalizationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsOutputSupportedCl(const TensorInfo& output, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeCl(reasonIfUnsupported, - output.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsPermuteSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const PermuteDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(output); - FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor); -} - -bool IsPooling2dSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const Pooling2dDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsResizeBilinearSupportedCl(const TensorInfo& input, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeCl(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &FalseFuncU8<>); -} - -bool IsSoftmaxSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const SoftmaxDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(descriptor); - FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output); -} - -bool IsSplitterSupportedCl(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(descriptor); - return IsSupportedForDataTypeCl(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsFakeQuantizationSupportedCl(const TensorInfo& input, - const FakeQuantizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(descriptor); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsReshapeSupportedCl(const TensorInfo& input, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(reasonIfUnsupported); - return true; -} - -bool IsFloorSupportedCl(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - ignore_unused(output); - return IsClBackendSupported(reasonIfUnsupported) && - IsSupportedForDataTypeGeneric(reasonIfUnsupported, - input.GetDataType(), - &FalseFuncF16<>, - &TrueFunc<>, - &FalseFuncU8<>); -} - -bool IsLstmSupportedCl(const TensorInfo& input, - const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, - const TensorInfo& scratchBuffer, - const TensorInfo& outputStateOut, - const TensorInfo& cellStateOut, - const TensorInfo& output, - const LstmDescriptor& descriptor, - const TensorInfo& inputToForgetWeights, - const TensorInfo& inputToCellWeights, - const TensorInfo& inputToOutputWeights, - const TensorInfo& recurrentToForgetWeights, - const TensorInfo& recurrentToCellWeights, - const TensorInfo& recurrentToOutputWeights, - const TensorInfo& forgetGateBias, - const TensorInfo& cellBias, - const TensorInfo& outputGateBias, - const TensorInfo* inputToInputWeights, - const TensorInfo* recurrentToInputWeights, - const TensorInfo* cellToInputWeights, - const TensorInfo* inputGateBias, - const TensorInfo* projectionWeights, - const TensorInfo* projectionBias, - const TensorInfo* cellToForgetWeights, - const TensorInfo* cellToOutputWeights, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate, - reasonIfUnsupported, - input, - outputStateIn, - cellStateIn, - scratchBuffer, - outputStateOut, - cellStateOut, - output, - descriptor, - inputToForgetWeights, - inputToCellWeights, - inputToOutputWeights, - recurrentToForgetWeights, - recurrentToCellWeights, - recurrentToOutputWeights, - forgetGateBias, - cellBias, - outputGateBias, - inputToInputWeights, - recurrentToInputWeights, - cellToInputWeights, - inputGateBias, - projectionWeights, - projectionBias, - cellToForgetWeights, - cellToOutputWeights); -} - -bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate, - reasonIfUnsupported, - input, - output); -} - -bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate, - reasonIfUnsupported, - input, - output); -} - -bool IsMeanSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(output); - ignore_unused(descriptor); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsPadSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate, - reasonIfUnsupported, - input, - output, - descriptor); -} - -} +} // namespace armnn diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 2d57d10040..6bdeb5a6f6 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -169,165 +169,4 @@ public: Optional reasonIfUnsupported = EmptyOptional()) const override; }; -bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc); -bool IsClDepthwiseConvolution2dDescParamsSupported(Optional reasonIfUnsupported, - const DepthwiseConvolution2dDescriptor& parameters, - const TensorInfo& weights); - -bool IsActivationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const ActivationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsAdditionSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsBatchNormalizationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& mean, - const TensorInfo& var, - const TensorInfo& beta, - const TensorInfo& gamma, - const BatchNormalizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConstantSupportedCl(const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConvolution2dSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const Convolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const DepthwiseConvolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsDivisionSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsSubtractionSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsFullyConnectedSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& weights, - const TensorInfo& biases, - const FullyConnectedDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsInputSupportedCl(const TensorInfo& input, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsL2NormalizationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const L2NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsLstmSupportedCl(const TensorInfo& input, - const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, - const TensorInfo& scratchBuffer, - const TensorInfo& outputStateOut, - const TensorInfo& cellStateOut, - const TensorInfo& output, - const LstmDescriptor& descriptor, - const TensorInfo& inputToForgetWeights, - const TensorInfo& inputToCellWeights, - const TensorInfo& inputToOutputWeights, - const TensorInfo& recurrentToForgetWeights, - const TensorInfo& recurrentToCellWeights, - const TensorInfo& recurrentToOutputWeights, - const TensorInfo& forgetGateBias, - const TensorInfo& cellBias, - const TensorInfo& outputGateBias, - const TensorInfo* inputToInputWeights, - const TensorInfo* recurrentToInputWeights, - const TensorInfo* cellToInputWeights, - const TensorInfo* inputGateBias, - const TensorInfo* projectionWeights, - const TensorInfo* projectionBias, - const TensorInfo* cellToForgetWeights, - const TensorInfo* cellToOutputWeights, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsMergerSupportedCl(const std::vector inputs, - const OriginsDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsMultiplicationSupportedCl(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsNormalizationSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsOutputSupportedCl(const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsPermuteSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const PermuteDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsPooling2dSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const Pooling2dDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsResizeBilinearSupportedCl(const TensorInfo& input, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsSoftmaxSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const SoftmaxDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsSplitterSupportedCl(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsFakeQuantizationSupportedCl(const TensorInfo& input, - const FakeQuantizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsReshapeSupportedCl(const TensorInfo& input, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsFloorSupportedCl(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsMeanSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsPadSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -} +} // namespace armnn diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 0f8b75f50e..62ce2cb18f 100755 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -84,7 +84,8 @@ BOOST_AUTO_TEST_CASE(Softmax4dSupport) const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); // 4D Softmax should be reported as unsupported on the CL backend - BOOST_TEST(!armnn::IsSoftmaxSupportedCl(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); + armnn::ClLayerSupport layerSupport; + BOOST_TEST(!layerSupport.IsSoftmaxSupported(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); } // Splitter -- cgit v1.2.1