From fc82431755edb950b46aaeda5725635c1fe7d02d Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Mon, 15 Oct 2018 15:00:13 +0100 Subject: IVGCVSW-2003: Get rid of IsLayerSupportedNeon functions in favor of ILayerSupport interface Change-Id: I03985ff678acf9393680340638a2e1f425b9966f --- src/backends/neon/NeonLayerSupport.cpp | 737 ++++++--------------- src/backends/neon/NeonLayerSupport.hpp | 166 +---- src/backends/neon/test/NeonLayerTests.cpp | 56 +- .../workloads/NeonNormalizationFloatWorkload.cpp | 30 +- 4 files changed, 272 insertions(+), 717 deletions(-) diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index a044e04f42..99e227897f 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -5,12 +5,11 @@ #include "NeonLayerSupport.hpp" -#include -#include - #include -#include +#include +#include #include +#include #include @@ -35,12 +34,71 @@ using namespace boost; namespace armnn { +namespace +{ + +bool IsNeonBackendSupported(Optional reasonIfUnsupported) +{ +#if ARMCOMPUTENEON_ENABLED + return true; +#else + if (reasonIfUnsupported) + { + reasonIfUnsupported.value() = "The armnn library has been built without NEON support"; + } + return false; +#endif +} + +template +bool IsSupportedForDataTypeNeon(Optional reasonIfUnsupported, + DataType dataType, + FloatFunc floatFuncPtr, + Uint8Func uint8FuncPtr, + Params&&... params) +{ + return IsNeonBackendSupported(reasonIfUnsupported) && + IsSupportedForDataTypeGeneric(reasonIfUnsupported, + dataType, + floatFuncPtr, + floatFuncPtr, + uint8FuncPtr, + std::forward(params)...); +} + +#if ARMCOMPUTENEON_ENABLED +template +inline bool IsWorkloadSupported(FuncType& func, Optional reasonIfUnsupported, Args&&... args) +{ + arm_compute::Status aclStatus = func(std::forward(args)...); + const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); + if (!supported && reasonIfUnsupported) + { + reasonIfUnsupported.value() = aclStatus.error_description(); + } + return supported; +} + +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__); +#else +#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ + return IsNeonBackendSupported(reasonIfUnsupported); +#endif + +} // anonymous namespace + bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsActivationSupportedNeon(input, output, descriptor, reasonIfUnsupported); + ignore_unused(descriptor); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor); } bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0, @@ -48,7 +106,11 @@ bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsAdditionSupportedNeon(input0, input1, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, @@ -60,34 +122,44 @@ bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, const BatchNormalizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsBatchNormalizationSupportedNeon(input, - output, - mean, - var, - beta, - gamma, - descriptor, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate, + reasonIfUnsupported, + input, + output, + mean, + var, + beta, + gamma, + descriptor); } bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsConstantSupportedNeon(output, reasonIfUnsupported); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsConvertFp16ToFp32SupportedNeon(input, output, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(output); + ignore_unused(reasonIfUnsupported); + return true; } bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsConvertFp32ToFp16SupportedNeon(input, output, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(output); + ignore_unused(reasonIfUnsupported); + return true; } bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input, @@ -97,12 +169,13 @@ bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { - return armnn::IsConvolution2dSupportedNeon(input, - output, - descriptor, - weights, - biases, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor, + weights, + biases); } bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, @@ -112,12 +185,13 @@ bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, const Optional& biases, Optional reasonIfUnsupported) const { - return armnn::IsDepthwiseConvolutionSupportedNeon(input, - output, - descriptor, - weights, - biases, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor, + weights, + biases); } bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0, @@ -125,21 +199,34 @@ bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsDivisionSupportedNeon(input0, input1, output, reasonIfUnsupported); + ignore_unused(input0); + ignore_unused(input1); + ignore_unused(output); + ignore_unused(reasonIfUnsupported); + return false; } bool NeonLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsFakeQuantizationSupportedNeon(input, descriptor, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); + return false; } bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsFloorSupportedNeon(input, output, reasonIfUnsupported); + ignore_unused(output); + return IsNeonBackendSupported(reasonIfUnsupported) && + IsSupportedForDataTypeGeneric(reasonIfUnsupported, + input.GetDataType(), + &FalseFuncF16<>, + &TrueFunc<>, + &FalseFuncU8<>); } bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, @@ -149,18 +236,27 @@ bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, const FullyConnectedDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsFullyConnectedSupportedNeon(input, - output, - weights, - biases, - descriptor, - reasonIfUnsupported); + // At the moment U8 is unsupported + if (input.GetDataType() == DataType::QuantisedAsymm8) + { + return false; + } + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate, + reasonIfUnsupported, + input, + output, + weights, + biases, + descriptor); } bool NeonLayerSupport::IsInputSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { - return armnn::IsInputSupportedNeon(input, reasonIfUnsupported); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, @@ -168,7 +264,7 @@ bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, const L2NormalizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsL2NormalizationSupportedNeon(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input, @@ -198,32 +294,33 @@ bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input, const TensorInfo* cellToOutputWeights, Optional reasonIfUnsupported) const { - return armnn::IsLstmSupportedNeon(input, - outputStateIn, - cellStateIn, - scratchBuffer, - outputStateOut, - cellStateOut, - output, - descriptor, - inputToForgetWeights, - inputToCellWeights, - inputToOutputWeights, - recurrentToForgetWeights, - recurrentToCellWeights, - recurrentToOutputWeights, - forgetGateBias, - cellBias, - outputGateBias, - inputToInputWeights, - recurrentToInputWeights, - cellToInputWeights, - inputGateBias, - projectionWeights, - projectionBias, - cellToForgetWeights, - cellToOutputWeights, - reasonIfUnsupported); + ignore_unused(input); + ignore_unused(outputStateIn); + ignore_unused(cellStateIn); + ignore_unused(scratchBuffer); + ignore_unused(outputStateOut); + ignore_unused(cellStateOut); + ignore_unused(output); + ignore_unused(descriptor); + ignore_unused(inputToForgetWeights); + ignore_unused(inputToCellWeights); + ignore_unused(inputToOutputWeights); + ignore_unused(recurrentToForgetWeights); + ignore_unused(recurrentToCellWeights); + ignore_unused(recurrentToOutputWeights); + ignore_unused(forgetGateBias); + ignore_unused(cellBias); + ignore_unused(outputGateBias); + ignore_unused(inputToInputWeights); + ignore_unused(recurrentToInputWeights); + ignore_unused(cellToInputWeights); + ignore_unused(inputGateBias); + ignore_unused(projectionWeights); + ignore_unused(projectionBias); + ignore_unused(cellToForgetWeights); + ignore_unused(cellToOutputWeights); + ignore_unused(reasonIfUnsupported); + return false; } bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input, @@ -231,14 +328,22 @@ bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input, const MeanDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsMeanSupportedNeon(input, output, descriptor,reasonIfUnsupported); + ignore_unused(input); + ignore_unused(output); + ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); + return false; } bool NeonLayerSupport::IsMergerSupported(const std::vector inputs, const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsMergerSupportedNeon(inputs, descriptor, reasonIfUnsupported); + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + inputs[0]->GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, @@ -246,7 +351,11 @@ bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsMultiplicationSupportedNeon(input0, input1, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input, @@ -254,16 +363,20 @@ bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input, const NormalizationDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsNormalizationSupportedNeon(input, - output, - descriptor, - reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor); } bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsOutputSupportedNeon(output, reasonIfUnsupported); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + output.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool NeonLayerSupport::IsPadSupported(const TensorInfo& input, @@ -271,7 +384,11 @@ bool NeonLayerSupport::IsPadSupported(const TensorInfo& input, const PadDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsPadSupportedNeon(input, output, descriptor, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(output); + ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); + return false; } bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input, @@ -279,7 +396,7 @@ bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input, const PermuteDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsPermuteSupportedNeon(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input, @@ -287,19 +404,24 @@ bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input, const Pooling2dDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsPooling2dSupportedNeon(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { - return armnn::IsReshapeSupportedNeon(input, reasonIfUnsupported); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, Optional reasonIfUnsupported) const { - return armnn::IsResizeBilinearSupportedNeon(input, reasonIfUnsupported); + ignore_unused(input); + ignore_unused(reasonIfUnsupported); + return false; } bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input, @@ -307,14 +429,18 @@ bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input, const SoftmaxDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsSoftmaxSupportedNeon(input, output, descriptor, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input, const ViewsDescriptor& descriptor, Optional reasonIfUnsupported) const { - return armnn::IsSplitterSupportedNeon(input, descriptor, reasonIfUnsupported); + ignore_unused(descriptor); + return IsSupportedForDataTypeNeon(reasonIfUnsupported, + input.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0, @@ -322,14 +448,13 @@ bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0, const TensorInfo& output, Optional reasonIfUnsupported) const { - return armnn::IsSubtractionSupportedNeon(input0, input1, output, reasonIfUnsupported); + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate, + reasonIfUnsupported, + input0, + input1, + output); } -// -// Implementation functions -// -// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete! - bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc) { // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases, @@ -364,440 +489,4 @@ bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convol return preferDirectConvolution; } -bool IsNeonNormalizationDescParamsSupported(Optional reasonIfUnsupported, - const NormalizationDescriptor& parameters) -{ - if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness) - { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported"; - } - return false; - } - if (parameters.m_NormSize % 2 == 0) - { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Normalization size must be an odd number."; - } - return false; - } - - return true; -} - -bool IsNeonBackendSupported(Optional reasonIfUnsupported) -{ -#if ARMCOMPUTENEON_ENABLED - return true; -#else - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "The armnn library has been built without NEON support"; - } - return false; -#endif -} - -template -bool IsSupportedForDataTypeNeon(Optional reasonIfUnsupported, - DataType dataType, - FloatFunc floatFuncPtr, - Uint8Func uint8FuncPtr, - Params&&... params) -{ - return IsNeonBackendSupported(reasonIfUnsupported) && - IsSupportedForDataTypeGeneric(reasonIfUnsupported, - dataType, - floatFuncPtr, - floatFuncPtr, - uint8FuncPtr, - std::forward(params)...); -} - -#if ARMCOMPUTENEON_ENABLED -template -inline bool IsWorkloadSupported(FuncType& func, Optional reasonIfUnsupported, Args&&... args) -{ - arm_compute::Status aclStatus = func(std::forward(args)...); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - reasonIfUnsupported.value() = aclStatus.error_description(); - } - return supported; -} - -#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ - return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__); -#else -#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \ - return IsNeonBackendSupported(reasonIfUnsupported); -#endif - -bool IsActivationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const ActivationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(descriptor); - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate, - reasonIfUnsupported, - input, - output, - descriptor); -} - -bool IsAdditionSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsBatchNormalizationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& mean, - const TensorInfo& var, - const TensorInfo& beta, - const TensorInfo& gamma, - const BatchNormalizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate, - reasonIfUnsupported, - input, - output, - mean, - var, - beta, - gamma, - descriptor); -} - -bool IsConstantSupportedNeon(const TensorInfo& output, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - output.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsConvolution2dSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const Convolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate, - reasonIfUnsupported, - input, - output, - descriptor, - weights, - biases); -} - -bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const DepthwiseConvolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate, - reasonIfUnsupported, - input, - output, - descriptor, - weights, - biases); -} - -bool IsDivisionSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - // At the moment division is not supported - ignore_unused(input0); - ignore_unused(input1); - ignore_unused(output); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsSubtractionSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsFullyConnectedSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& weights, - const TensorInfo& biases, - const FullyConnectedDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - // At the moment U8 is unsupported - if (input.GetDataType() == DataType::QuantisedAsymm8) - { - return false; - } - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate, - reasonIfUnsupported, - input, - output, - weights, - biases, - descriptor); -} - -bool IsInputSupportedNeon(const TensorInfo& input, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsL2NormalizationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const L2NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsMergerSupportedNeon(const std::vector inputs, - const OriginsDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(descriptor); - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsMultiplicationSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate, - reasonIfUnsupported, - input0, - input1, - output); -} - -bool IsNormalizationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsOutputSupportedNeon(const TensorInfo& output, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - output.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsPermuteSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const PermuteDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsPooling2dSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const Pooling2dDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsResizeBilinearSupportedNeon(const TensorInfo& input, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsSoftmaxSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const SoftmaxDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor); -} - -bool IsSplitterSupportedNeon(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(descriptor); - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsFakeQuantizationSupportedNeon(const TensorInfo& input, - const FakeQuantizationDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(descriptor); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsReshapeSupportedNeon(const TensorInfo& input, - Optional reasonIfUnsupported) -{ - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - input.GetDataType(), - &TrueFunc<>, - &TrueFunc<>); -} - -bool IsFloorSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - ignore_unused(output); - return IsNeonBackendSupported(reasonIfUnsupported) && - IsSupportedForDataTypeGeneric(reasonIfUnsupported, - input.GetDataType(), - &FalseFuncF16<>, - &TrueFunc<>, - &FalseFuncU8<>); -} - -bool IsLstmSupportedNeon(const TensorInfo& input, - const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, - const TensorInfo& scratchBuffer, - const TensorInfo& outputStateOut, - const TensorInfo& cellStateOut, - const TensorInfo& output, - const LstmDescriptor& descriptor, - const TensorInfo& inputToForgetWeights, - const TensorInfo& inputToCellWeights, - const TensorInfo& inputToOutputWeights, - const TensorInfo& recurrentToForgetWeights, - const TensorInfo& recurrentToCellWeights, - const TensorInfo& recurrentToOutputWeights, - const TensorInfo& forgetGateBias, - const TensorInfo& cellBias, - const TensorInfo& outputGateBias, - const TensorInfo* inputToInputWeights, - const TensorInfo* recurrentToInputWeights, - const TensorInfo* cellToInputWeights, - const TensorInfo* inputGateBias, - const TensorInfo* projectionWeights, - const TensorInfo* projectionBias, - const TensorInfo* cellToForgetWeights, - const TensorInfo* cellToOutputWeights, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(outputStateIn); - ignore_unused(cellStateIn); - ignore_unused(scratchBuffer); - ignore_unused(outputStateOut); - ignore_unused(cellStateOut); - ignore_unused(output); - ignore_unused(descriptor); - ignore_unused(inputToForgetWeights); - ignore_unused(inputToCellWeights); - ignore_unused(inputToOutputWeights); - ignore_unused(recurrentToForgetWeights); - ignore_unused(recurrentToCellWeights); - ignore_unused(recurrentToOutputWeights); - ignore_unused(forgetGateBias); - ignore_unused(cellBias); - ignore_unused(outputGateBias); - ignore_unused(inputToInputWeights); - ignore_unused(recurrentToInputWeights); - ignore_unused(cellToInputWeights); - ignore_unused(inputGateBias); - ignore_unused(projectionWeights); - ignore_unused(projectionBias); - ignore_unused(cellToForgetWeights); - ignore_unused(cellToOutputWeights); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(output); - ignore_unused(reasonIfUnsupported); - return true; -} - -bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(output); - ignore_unused(reasonIfUnsupported); - return true; -} - -bool IsMeanSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(output); - ignore_unused(descriptor); - ignore_unused(reasonIfUnsupported); - return false; -} - -bool IsPadSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - Optional reasonIfUnsupported) -{ - ignore_unused(input); - ignore_unused(output); - ignore_unused(descriptor); - ignore_unused(reasonIfUnsupported); - return false; -} - -} +} // namespace armnn diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 1223ba893a..5e80ab8106 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -167,172 +167,8 @@ public: const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; -}; +}; // class NeonLayerSupport bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc); -bool IsNeonNormalizationDescParamsSupported(Optional reasonIfUnsupported, - const NormalizationDescriptor& parameters); - -bool IsActivationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const ActivationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsNeonDepthwiseConvolution2dDescParamsSupported(Optional reasonIfUnsupported, - const DepthwiseConvolution2dDescriptor& parameters, - const TensorInfo& weights); - -bool IsAdditionSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsBatchNormalizationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& mean, - const TensorInfo& var, - const TensorInfo& beta, - const TensorInfo& gamma, - const BatchNormalizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConstantSupportedNeon(const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConvolution2dSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const Convolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported = EmptyOptional()); - - -bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const DepthwiseConvolution2dDescriptor& descriptor, - const TensorInfo& weights, - const Optional& biases, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsDivisionSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsSubtractionSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsFullyConnectedSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const TensorInfo& weights, - const TensorInfo& biases, - const FullyConnectedDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsInputSupportedNeon(const TensorInfo& input, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsL2NormalizationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const L2NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsMergerSupportedNeon(const std::vector inputs, - const OriginsDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsMultiplicationSupportedNeon(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsNormalizationSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const NormalizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsOutputSupportedNeon(const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsPermuteSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const PermuteDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsPooling2dSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const Pooling2dDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsResizeBilinearSupportedNeon(const TensorInfo& input, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsSoftmaxSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const SoftmaxDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsSplitterSupportedNeon(const TensorInfo& input, - const ViewsDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsFakeQuantizationSupportedNeon(const TensorInfo& input, - const FakeQuantizationDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsReshapeSupportedNeon(const TensorInfo& input, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsFloorSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsLstmSupportedNeon(const TensorInfo& input, - const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, - const TensorInfo& scratchBuffer, - const TensorInfo& outputStateOut, - const TensorInfo& cellStateOut, - const TensorInfo& output, - const LstmDescriptor& descriptor, - const TensorInfo& inputToForgetWeights, - const TensorInfo& inputToCellWeights, - const TensorInfo& inputToOutputWeights, - const TensorInfo& recurrentToForgetWeights, - const TensorInfo& recurrentToCellWeights, - const TensorInfo& recurrentToOutputWeights, - const TensorInfo& forgetGateBias, - const TensorInfo& cellBias, - const TensorInfo& outputGateBias, - const TensorInfo* inputToInputWeights, - const TensorInfo* recurrentToInputWeights, - const TensorInfo* cellToInputWeights, - const TensorInfo* inputGateBias, - const TensorInfo* projectionWeights, - const TensorInfo* projectionBias, - const TensorInfo* cellToForgetWeights, - const TensorInfo* cellToOutputWeights, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConvertFp16ToFp32SupportedNeon(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsMeanSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - -bool IsPadSupportedNeon(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - Optional reasonIfUnsupported = EmptyOptional()); - } // namespace armnn diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 36138b3c3f..31ee7d87c1 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -154,78 +154,79 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils) armnn::TensorInfo biasesInfo; armnn::DepthwiseConvolution2dDescriptor descriptor; + armnn::NeonLayerSupport layerSupport; // Strides supported: 1,2,3 descriptor = MakeDepthwiseConv2dDesc(1, 1); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(1, 2); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(1, 3); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(2, 1); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(2, 2); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(2, 3); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(3, 1); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(3, 2); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); descriptor = MakeDepthwiseConv2dDesc(3, 3); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); // Supported stride 4 descriptor = MakeDepthwiseConv2dDesc(4, 1); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); // Supported weights shape 1x1 armnn::TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, armnn::DataType::Float32); descriptor = MakeDepthwiseConv2dDesc(1, 1); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo1x1, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo1x1, biasesInfo)); // Supported shape 2x2 armnn::TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, armnn::DataType::Float32); descriptor = MakeDepthwiseConv2dDesc(1, 1); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo2x2, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo2x2, biasesInfo)); // Asymmetric padding descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2); outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType); - BOOST_TEST(armnn::IsDepthwiseConvolutionSupportedNeon(inputInfo, outputInfo, descriptor, - weightsInfo3x3, biasesInfo)); + BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor, + weightsInfo3x3, biasesInfo)); } // Pooling @@ -298,7 +299,8 @@ BOOST_AUTO_TEST_CASE(Softmax4dSupport) const armnn::TensorInfo outputInfo(numDimensions, &dimensionSizes.front(), armnn::DataType::Float32); // 4D Softmax should be reported as unsupported on the NEON backend - BOOST_TEST(!armnn::IsSoftmaxSupportedNeon(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); + armnn::NeonLayerSupport layerSupport; + BOOST_TEST(!layerSupport.IsSoftmaxSupported(inputInfo, outputInfo, armnn::SoftmaxDescriptor())); } // Splitter diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp index 0deff79dac..1894048788 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp @@ -13,6 +13,34 @@ using namespace armnn::armcomputetensorutils; namespace armnn { +namespace +{ + +bool IsNeonNormalizationDescriptorSupported(const NormalizationDescriptor& parameters, + Optional reasonIfUnsupported) +{ + if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness) + { + if (reasonIfUnsupported) + { + reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported"; + } + return false; + } + if (parameters.m_NormSize % 2 == 0) + { + if (reasonIfUnsupported) + { + reasonIfUnsupported.value() = "Normalization size must be an odd number."; + } + return false; + } + + return true; +} + +} // anonymous namespace + arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor) @@ -33,7 +61,7 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati { m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1); std::string reasonIfUnsupported; - if (!IsNeonNormalizationDescParamsSupported(Optional(reasonIfUnsupported), m_Data.m_Parameters)) + if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional(reasonIfUnsupported))) { throw UnimplementedException(reasonIfUnsupported); } -- cgit v1.2.1