From 085f0a42623401f78bd1df34cdcebe8555809410 Mon Sep 17 00:00:00 2001 From: arovir01 Date: Mon, 8 Oct 2018 14:48:19 +0100 Subject: IVGCVSW-1985: Replace std::string* reasonIfNotSupported with Optional in IsLayerSupported implementations Change-Id: I2f054f0dcff9bdc86ee90c55b3e94c6b4ae25085 --- src/backends/cl/ClLayerSupport.cpp | 203 +++++++++++++-------- src/backends/cl/ClLayerSupport.hpp | 114 +++++++----- src/backends/cl/workloads/ClAdditionWorkload.cpp | 15 +- src/backends/cl/workloads/ClAdditionWorkload.hpp | 7 +- .../cl/workloads/ClConvertFp16ToFp32Workload.cpp | 16 +- .../cl/workloads/ClConvertFp16ToFp32Workload.hpp | 4 +- .../cl/workloads/ClConvertFp32ToFp16Workload.cpp | 16 +- .../cl/workloads/ClConvertFp32ToFp16Workload.hpp | 4 +- src/backends/cl/workloads/ClPadWorkload.cpp | 15 +- src/backends/cl/workloads/ClPadWorkload.hpp | 7 +- .../cl/workloads/ClSubtractionWorkload.cpp | 15 +- .../cl/workloads/ClSubtractionWorkload.hpp | 7 +- 12 files changed, 216 insertions(+), 207 deletions(-) (limited to 'src/backends/cl') diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 434b069092..494b339952 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -22,16 +22,16 @@ #include "workloads/ClConvolution2dWorkload.hpp" #include "workloads/ClDepthwiseConvolutionWorkload.hpp" #include "workloads/ClDivisionFloatWorkload.hpp" +#include "workloads/ClFullyConnectedWorkload.hpp" #include "workloads/ClL2NormalizationFloatWorkload.hpp" +#include "workloads/ClLstmFloatWorkload.hpp" #include "workloads/ClMultiplicationWorkload.hpp" -#include "workloads/ClFullyConnectedWorkload.hpp" +#include "workloads/ClNormalizationFloatWorkload.hpp" #include "workloads/ClPadWorkload.hpp" -#include "workloads/ClPooling2dBaseWorkload.hpp" #include "workloads/ClPermuteWorkload.hpp" -#include "workloads/ClNormalizationFloatWorkload.hpp" +#include "workloads/ClPooling2dBaseWorkload.hpp" #include "workloads/ClSoftmaxBaseWorkload.hpp" #include "workloads/ClSubtractionWorkload.hpp" -#include "workloads/ClLstmFloatWorkload.hpp" #endif using namespace boost; @@ -59,14 +59,14 @@ bool IsMatchingStride(uint32_t actualStride) return IsMatchingStride(actualStride) || IsMatchingStride(actualStride); }; -bool IsClBackendSupported(std::string* reasonIfUnsupported) +bool IsClBackendSupported(Optional reasonIfUnsupported) { #if ARMCOMPUTECL_ENABLED return true; #else - if (reasonIfUnsupported != nullptr) + if (reasonIfUnsupported) { - *reasonIfUnsupported = "The armnn library has been built without CL support"; + reasonIfUnsupported.value() = "The armnn library has been built without CL support"; } return false; #endif @@ -80,13 +80,13 @@ bool IsClBackendSupported(std::string* reasonIfUnsupported) #if ARMCOMPUTECL_ENABLED template -inline bool IsWorkloadSupported(FuncType&& func, std::string* reasonIfUnsupported, Args&&... args) +inline bool IsWorkloadSupported(FuncType&& func, Optional reasonIfUnsupported, Args&&... args) { arm_compute::Status aclStatus = func(std::forward(args)...); const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); if (!supported && reasonIfUnsupported) { - *reasonIfUnsupported = aclStatus.error_description(); + reasonIfUnsupported.value() = aclStatus.error_description(); } return supported; } @@ -101,7 +101,7 @@ inline bool IsWorkloadSupported(FuncType&& func, std::string* reasonIfUnsupporte } //namespace template -bool IsSupportedForDataTypeCl(std::string* reasonIfUnsupported, +bool IsSupportedForDataTypeCl(Optional reasonIfUnsupported, DataType dataType, FloatFunc floatFuncPtr, Uint8Func uint8FuncPtr, @@ -119,7 +119,7 @@ bool IsSupportedForDataTypeCl(std::string* reasonIfUnsupported, bool IsActivationSupportedCl(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate, reasonIfUnsupported, @@ -131,12 +131,13 @@ bool IsActivationSupportedCl(const TensorInfo& input, bool IsAdditionSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { - return FORWARD_CL_LAYER_SUPPORT_FUNC(ClAdditionValidate(input0, - input1, - output, - reasonIfUnsupported)); + FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool IsBatchNormalizationSupportedCl(const TensorInfo& input, @@ -146,7 +147,7 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input, const TensorInfo& beta, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate, reasonIfUnsupported, @@ -160,7 +161,7 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input, } bool IsConstantSupportedCl(const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { return IsSupportedForDataTypeCl(reasonIfUnsupported, output.GetDataType(), @@ -201,10 +202,11 @@ bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convol return isSupported; } -bool IsDirectConvolution2dParamsSupportedCl(std::string* reasonIfUnsupported, +bool IsDirectConvolution2dParamsSupportedCl(Optional reasonIfUnsupported, const Convolution2dDescriptor& parameters, const TensorInfo& weightInfo) { + ignore_unused(reasonIfUnsupported); return IsClDirectConvolution2dSupported(weightInfo, parameters); } @@ -213,7 +215,7 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate, reasonIfUnsupported, @@ -229,7 +231,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, const DepthwiseConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate, reasonIfUnsupported, @@ -243,7 +245,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, bool IsDivisionSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate, reasonIfUnsupported, @@ -255,12 +257,14 @@ bool IsDivisionSupportedCl(const TensorInfo& input0, bool IsSubtractionSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { - return FORWARD_CL_LAYER_SUPPORT_FUNC(ClSubtractionValidate(input0, - input1, - output, - reasonIfUnsupported)); + + FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate, + reasonIfUnsupported, + input0, + input1, + output); } bool IsFullyConnectedSupportedCl(const TensorInfo& input, @@ -268,7 +272,7 @@ bool IsFullyConnectedSupportedCl(const TensorInfo& input, const TensorInfo& weights, const TensorInfo& biases, const FullyConnectedDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate, reasonIfUnsupported, @@ -280,7 +284,7 @@ bool IsFullyConnectedSupportedCl(const TensorInfo& input, } bool IsInputSupportedCl(const TensorInfo& input, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { return IsSupportedForDataTypeCl(reasonIfUnsupported, input.GetDataType(), @@ -291,14 +295,14 @@ bool IsInputSupportedCl(const TensorInfo& input, bool IsL2NormalizationSupportedCl(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool IsMergerSupportedCl(const std::vector inputs, const OriginsDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(descriptor); return IsSupportedForDataTypeCl(reasonIfUnsupported, @@ -310,7 +314,7 @@ bool IsMergerSupportedCl(const std::vector inputs, bool IsMultiplicationSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate, reasonIfUnsupported, @@ -322,13 +326,13 @@ bool IsMultiplicationSupportedCl(const TensorInfo& input0, bool IsNormalizationSupportedCl(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool IsOutputSupportedCl(const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { return IsSupportedForDataTypeCl(reasonIfUnsupported, output.GetDataType(), @@ -336,18 +340,10 @@ bool IsOutputSupportedCl(const TensorInfo& output, &TrueFunc<>); } -bool IsPadSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - std::string* reasonIfUnsupported) -{ - return FORWARD_CL_LAYER_SUPPORT_FUNC(ClPadValidate(input, output, descriptor, reasonIfUnsupported)); -} - bool IsPermuteSupportedCl(const TensorInfo& input, const TensorInfo& output, const PermuteDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(input); ignore_unused(output); @@ -357,13 +353,13 @@ bool IsPermuteSupportedCl(const TensorInfo& input, bool IsPooling2dSupportedCl(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } bool IsResizeBilinearSupportedCl(const TensorInfo& input, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { return IsSupportedForDataTypeCl(reasonIfUnsupported, input.GetDataType(), @@ -374,7 +370,7 @@ bool IsResizeBilinearSupportedCl(const TensorInfo& input, bool IsSoftmaxSupportedCl(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(descriptor); FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output); @@ -382,7 +378,7 @@ bool IsSoftmaxSupportedCl(const TensorInfo& input, bool IsSplitterSupportedCl(const TensorInfo& input, const ViewsDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(descriptor); return IsSupportedForDataTypeCl(reasonIfUnsupported, @@ -393,23 +389,25 @@ bool IsSplitterSupportedCl(const TensorInfo& input, bool IsFakeQuantizationSupportedCl(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(input); ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); return false; } bool IsReshapeSupportedCl(const TensorInfo& input, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(input); + ignore_unused(reasonIfUnsupported); return true; } bool IsFloorSupportedCl(const TensorInfo& input, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { ignore_unused(output); return IsClBackendSupported(reasonIfUnsupported) && @@ -420,59 +418,104 @@ bool IsFloorSupportedCl(const TensorInfo& input, &FalseFuncU8<>); } -bool IsLstmSupportedCl(const TensorInfo& input, const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer, - const TensorInfo& outputStateOut, const TensorInfo& cellStateOut, - const TensorInfo& output, const LstmDescriptor& descriptor, - const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights, - const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights, - const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights, - const TensorInfo& forgetGateBias, const TensorInfo& cellBias, - const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights, - const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights, - const TensorInfo* inputGateBias, const TensorInfo* projectionWeights, - const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights, - const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported) -{ - FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate, reasonIfUnsupported, - input, outputStateIn, cellStateIn, scratchBuffer, outputStateOut, cellStateOut, - output, descriptor, inputToForgetWeights, inputToCellWeights, - inputToOutputWeights, recurrentToForgetWeights, - recurrentToCellWeights, recurrentToOutputWeights, - forgetGateBias, cellBias, outputGateBias, - inputToInputWeights, recurrentToInputWeights, - cellToInputWeights, inputGateBias, projectionWeights, - projectionBias, cellToForgetWeights, cellToOutputWeights); +bool IsLstmSupportedCl(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + Optional reasonIfUnsupported) +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate, + reasonIfUnsupported, + input, + outputStateIn, + cellStateIn, + scratchBuffer, + outputStateOut, + cellStateOut, + output, + descriptor, + inputToForgetWeights, + inputToCellWeights, + inputToOutputWeights, + recurrentToForgetWeights, + recurrentToCellWeights, + recurrentToOutputWeights, + forgetGateBias, + cellBias, + outputGateBias, + inputToInputWeights, + recurrentToInputWeights, + cellToInputWeights, + inputGateBias, + projectionWeights, + projectionBias, + cellToForgetWeights, + cellToOutputWeights); } bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate, reasonIfUnsupported, input, - output, - reasonIfUnsupported); + output); } bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input, const TensorInfo& output, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate, reasonIfUnsupported, input, - output, - reasonIfUnsupported); + output); } bool IsMeanSupportedCl(const TensorInfo& input, const TensorInfo& output, const MeanDescriptor& descriptor, - std::string* reasonIfUnsupported) + Optional reasonIfUnsupported) { + ignore_unused(input); + ignore_unused(output); + ignore_unused(descriptor); + ignore_unused(reasonIfUnsupported); return false; } +bool IsPadSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + Optional reasonIfUnsupported) +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate, + reasonIfUnsupported, + input, + output, + descriptor); +} + } diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp index 314ac4c73f..5cd756ba1b 100644 --- a/src/backends/cl/ClLayerSupport.hpp +++ b/src/backends/cl/ClLayerSupport.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include #include @@ -18,19 +19,19 @@ class ClLayerSupport : public ILayerSupport }; bool IsClDirectConvolution2dSupported(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc); -bool IsClDepthwiseConvolution2dDescParamsSupported(std::string* reasonIfUnsupported, +bool IsClDepthwiseConvolution2dDescParamsSupported(Optional reasonIfUnsupported, const DepthwiseConvolution2dDescriptor& parameters, const TensorInfo& weights); bool IsActivationSupportedCl(const TensorInfo& input, const TensorInfo& output, const ActivationDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsAdditionSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsBatchNormalizationSupportedCl(const TensorInfo& input, const TensorInfo& output, @@ -39,130 +40,143 @@ bool IsBatchNormalizationSupportedCl(const TensorInfo& input, const TensorInfo& beta, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsConstantSupportedCl(const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsConvolution2dSupportedCl(const TensorInfo& input, const TensorInfo& output, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input, const TensorInfo& output, const DepthwiseConvolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsDivisionSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsSubtractionSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsFullyConnectedSupportedCl(const TensorInfo& input, const TensorInfo& output, const TensorInfo& weights, const TensorInfo& biases, const FullyConnectedDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsInputSupportedCl(const TensorInfo& input, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsL2NormalizationSupportedCl(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); - -bool IsLstmSupportedCl(const TensorInfo& input, const TensorInfo& outputStateIn, - const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer, - const TensorInfo& outputStateOut, const TensorInfo& cellStateOut, - const TensorInfo& output, const LstmDescriptor& descriptor, - const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights, - const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights, - const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights, - const TensorInfo& forgetGateBias, const TensorInfo& cellBias, - const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights, - const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights, - const TensorInfo* inputGateBias, const TensorInfo* projectionWeights, - const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights, - const TensorInfo* cellToOutputWeights, std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); + +bool IsLstmSupportedCl(const TensorInfo& input, + const TensorInfo& outputStateIn, + const TensorInfo& cellStateIn, + const TensorInfo& scratchBuffer, + const TensorInfo& outputStateOut, + const TensorInfo& cellStateOut, + const TensorInfo& output, + const LstmDescriptor& descriptor, + const TensorInfo& inputToForgetWeights, + const TensorInfo& inputToCellWeights, + const TensorInfo& inputToOutputWeights, + const TensorInfo& recurrentToForgetWeights, + const TensorInfo& recurrentToCellWeights, + const TensorInfo& recurrentToOutputWeights, + const TensorInfo& forgetGateBias, + const TensorInfo& cellBias, + const TensorInfo& outputGateBias, + const TensorInfo* inputToInputWeights, + const TensorInfo* recurrentToInputWeights, + const TensorInfo* cellToInputWeights, + const TensorInfo* inputGateBias, + const TensorInfo* projectionWeights, + const TensorInfo* projectionBias, + const TensorInfo* cellToForgetWeights, + const TensorInfo* cellToOutputWeights, + Optional reasonIfUnsupported = EmptyOptional()); bool IsMergerSupportedCl(const std::vector inputs, const OriginsDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsMultiplicationSupportedCl(const TensorInfo& input0, const TensorInfo& input1, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsNormalizationSupportedCl(const TensorInfo& input, const TensorInfo& output, const NormalizationDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsOutputSupportedCl(const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); - -bool IsPadSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsPermuteSupportedCl(const TensorInfo& input, const TensorInfo& output, const PermuteDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsPooling2dSupportedCl(const TensorInfo& input, const TensorInfo& output, const Pooling2dDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsResizeBilinearSupportedCl(const TensorInfo& input, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsSoftmaxSupportedCl(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsSplitterSupportedCl(const TensorInfo& input, const ViewsDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsFakeQuantizationSupportedCl(const TensorInfo& input, const FakeQuantizationDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsReshapeSupportedCl(const TensorInfo& input, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsFloorSupportedCl(const TensorInfo& input, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); - -bool IsMeanSupportedCl(const TensorInfo& input, - const TensorInfo& output, - const MeanDescriptor& descriptor, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input, const TensorInfo& output, - std::string* reasonIfUnsupported = nullptr); + Optional reasonIfUnsupported = EmptyOptional()); + +bool IsMeanSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()); + +bool IsPadSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()); } diff --git a/src/backends/cl/workloads/ClAdditionWorkload.cpp b/src/backends/cl/workloads/ClAdditionWorkload.cpp index aa032e872c..ec0dd30275 100644 --- a/src/backends/cl/workloads/ClAdditionWorkload.cpp +++ b/src/backends/cl/workloads/ClAdditionWorkload.cpp @@ -37,10 +37,9 @@ void ClAdditionWorkload::Execute() const m_Layer.run(); } -bool ClAdditionValidate(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - std::string* reasonIfUnsupported) +arm_compute::Status ClAdditionValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output) { const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0); const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1); @@ -51,13 +50,7 @@ bool ClAdditionValidate(const TensorInfo& input0, &aclOutputInfo, g_AclConvertPolicy); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - *reasonIfUnsupported = aclStatus.error_description(); - } - - return supported; + return aclStatus; } } //namespace armnn diff --git a/src/backends/cl/workloads/ClAdditionWorkload.hpp b/src/backends/cl/workloads/ClAdditionWorkload.hpp index 3e4ee26793..c5e6affac8 100644 --- a/src/backends/cl/workloads/ClAdditionWorkload.hpp +++ b/src/backends/cl/workloads/ClAdditionWorkload.hpp @@ -24,8 +24,7 @@ private: mutable arm_compute::CLArithmeticAddition m_Layer; }; -bool ClAdditionValidate(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - std::string* reasonIfUnsupported); +arm_compute::Status ClAdditionValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output); } //namespace armnn diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp index e7663b4ca4..2c9a0e1fc2 100644 --- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.cpp @@ -32,19 +32,15 @@ void ClConvertFp16ToFp32Workload::Execute() const m_Layer.run(); } -arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - std::string* reasonIfUnsupported) +arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output) { if (input.GetDataType() != DataType::Float16) { - *reasonIfUnsupported = "Input should be Float16"; - return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported); + return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float16"); } if (output.GetDataType() != DataType::Float32) { - *reasonIfUnsupported = "Output should be Float32"; - return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported); + return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float32"); } const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); @@ -53,12 +49,6 @@ arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate( &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - *reasonIfUnsupported = aclStatus.error_description(); - } - return aclStatus; } diff --git a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp index b6447488f7..f5f230d869 100644 --- a/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp +++ b/src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp @@ -23,8 +23,6 @@ private: mutable arm_compute::CLDepthConvertLayer m_Layer; }; -arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - std::string* reasonIfUnsupported); +arm_compute::Status ClConvertFp16ToFp32WorkloadValidate(const TensorInfo& input, const TensorInfo& output); } //namespace armnn diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp index 2ae4adc424..6758180a6e 100644 --- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp +++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.cpp @@ -32,19 +32,15 @@ void ClConvertFp32ToFp16Workload::Execute() const m_Layer.run(); } -arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - std::string* reasonIfUnsupported) +arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output) { if (input.GetDataType() != DataType::Float32) { - *reasonIfUnsupported = "Input should be Float32"; - return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported); + return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Input should be Float32"); } if (output.GetDataType() != DataType::Float16) { - *reasonIfUnsupported = "Output should be Float16"; - return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, *reasonIfUnsupported); + return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR, "Output should be Float16"); } const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); @@ -53,12 +49,6 @@ arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const arm_compute::Status aclStatus = arm_compute::CLDepthConvertLayer::validate( &aclInputInfo, &aclOutputInfo, g_AclConvertPolicy, 0); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - *reasonIfUnsupported = aclStatus.error_description(); - } - return aclStatus; } diff --git a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp index 95d19905d7..28d0bfa9b0 100644 --- a/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp +++ b/src/backends/cl/workloads/ClConvertFp32ToFp16Workload.hpp @@ -23,8 +23,6 @@ private: mutable arm_compute::CLDepthConvertLayer m_Layer; }; -arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - std::string* reasonIfUnsupported); +arm_compute::Status ClConvertFp32ToFp16WorkloadValidate(const TensorInfo& input, const TensorInfo& output); } //namespace armnn diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp index 45dc5e8be7..89b0d8fde9 100644 --- a/src/backends/cl/workloads/ClPadWorkload.cpp +++ b/src/backends/cl/workloads/ClPadWorkload.cpp @@ -35,10 +35,9 @@ void ClPadWorkload::Execute() const m_Layer.run(); } -bool ClPadValidate(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - std::string* reasonIfUnsupported) +arm_compute::Status ClPadValidate(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor) { const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); @@ -48,13 +47,7 @@ bool ClPadValidate(const TensorInfo& input, &aclOutputInfo, padList); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - *reasonIfUnsupported = aclStatus.error_description(); - } - - return supported; + return aclStatus; } } // namespace armnn diff --git a/src/backends/cl/workloads/ClPadWorkload.hpp b/src/backends/cl/workloads/ClPadWorkload.hpp index a7ad6670a7..97f57fdeac 100644 --- a/src/backends/cl/workloads/ClPadWorkload.hpp +++ b/src/backends/cl/workloads/ClPadWorkload.hpp @@ -23,10 +23,9 @@ private: mutable arm_compute::CLPadLayer m_Layer; }; -bool ClPadValidate(const TensorInfo& input, - const TensorInfo& output, - const PadDescriptor& descriptor, - std::string* reasonIfUnsupported); +arm_compute::Status ClPadValidate(const TensorInfo& input, + const TensorInfo& output, + const PadDescriptor& descriptor); } //namespace armnn diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.cpp b/src/backends/cl/workloads/ClSubtractionWorkload.cpp index 8efed94293..1967fae354 100644 --- a/src/backends/cl/workloads/ClSubtractionWorkload.cpp +++ b/src/backends/cl/workloads/ClSubtractionWorkload.cpp @@ -35,10 +35,9 @@ void ClSubtractionWorkload::Execute() const m_Layer.run(); } -bool ClSubtractionValidate(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - std::string* reasonIfUnsupported) +arm_compute::Status ClSubtractionValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output) { const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0); const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1); @@ -49,13 +48,7 @@ bool ClSubtractionValidate(const TensorInfo& input0, &aclOutputInfo, g_AclConvertPolicy); - const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK); - if (!supported && reasonIfUnsupported) - { - *reasonIfUnsupported = aclStatus.error_description(); - } - - return supported; + return aclStatus; } } //namespace armnn diff --git a/src/backends/cl/workloads/ClSubtractionWorkload.hpp b/src/backends/cl/workloads/ClSubtractionWorkload.hpp index 7dd608bf8a..3a4210da07 100644 --- a/src/backends/cl/workloads/ClSubtractionWorkload.hpp +++ b/src/backends/cl/workloads/ClSubtractionWorkload.hpp @@ -23,8 +23,7 @@ private: mutable arm_compute::CLArithmeticSubtraction m_Layer; }; -bool ClSubtractionValidate(const TensorInfo& input0, - const TensorInfo& input1, - const TensorInfo& output, - std::string* reasonIfUnsupported); +arm_compute::Status ClSubtractionValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output); } //namespace armnn -- cgit v1.2.1