From d30093c755b7e4e354eb537272876ec95b0eddc9 Mon Sep 17 00:00:00 2001 From: Ferran Balaguer Date: Tue, 9 Jul 2019 17:04:47 +0100 Subject: IVGCVSW-3477 Refactor android-nn-driver to use armnn ILayerSupported !armnn:1508 Signed-off-by: Ferran Balaguer Change-Id: Ica5fcb683f101bde9e651f0be0f5b9b4c409d1aa --- 1.0/HalPolicy.cpp | 208 +++++++++++++++++++++++++++++++--------------------- 1.1/HalPolicy.cpp | 136 ++++++++++++++++++++-------------- 1.2/HalPolicy.cpp | 99 +++++++++++++++---------- ConversionUtils.cpp | 15 ++-- ConversionUtils.hpp | 162 ++++++++++++++++++++++------------------ 5 files changed, 364 insertions(+), 256 deletions(-) diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp index eb594427..b87727c6 100644 --- a/1.0/HalPolicy.cpp +++ b/1.0/HalPolicy.cpp @@ -117,12 +117,15 @@ bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, Conve const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsAdditionSupported, - data.m_Backends, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsAdditionSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo); + if (!isSupported) { return false; } @@ -317,12 +320,16 @@ bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& mo std::vector inputTensorInfos; std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos), [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); }); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsConcatSupported, - data.m_Backends, - inputTensorInfos, - outputInfo, - concatDescriptor)) + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConcatSupported, + data.m_Backends, + isSupported, + inputTensorInfos, + outputInfo, + concatDescriptor); + if (!isSupported) { return false; } @@ -396,11 +403,14 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model outputInfo.SetShape(input.GetTensorInfo().GetShape()); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsDequantizeSupported, - data.m_Backends, - input.GetTensorInfo(), - outputInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsDequantizeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + GetTensorInfoForOperand(*outputOperand)); + if (!isSupported) { return false; } @@ -431,11 +441,14 @@ bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, Con return Fail("%s: Operation has invalid outputs", __func__); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsFloorSupported, - data.m_Backends, - input.GetTensorInfo(), - GetTensorInfoForOperand(*outputOperand))) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFloorSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + GetTensorInfoForOperand(*outputOperand)); + if (!isSupported) { return false; } @@ -499,14 +512,17 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m desc.m_TransposeWeightMatrix = true; desc.m_BiasEnabled = true; - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsFullyConnectedSupported, - data.m_Backends, - reshapedInfo, - outputInfo, - weights.GetInfo(), - bias.GetInfo(), - desc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFullyConnectedSupported, + data.m_Backends, + isSupported, + reshapedInfo, + outputInfo, + weights.GetInfo(), + bias.GetInfo(), + desc); + if (!isSupported) { return false; } @@ -579,12 +595,15 @@ bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation, // window rather than the radius as in AndroidNN. descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsNormalizationSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsNormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -898,6 +917,7 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo()); paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo()); + // Optional parameters if(!desc.m_CifgEnabled) { paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo()); @@ -924,18 +944,21 @@ bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, Conv paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo()); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsLstmSupported, - data.m_Backends, - inputInfo, - outputStateInInfo, - cellStateInInfo, - scratchBufferInfo, - outputStateOutInfo, - cellStateOutInfo, - outputInfo, - desc, - paramsInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsLstmSupported, + data.m_Backends, + isSupported, + inputInfo, + outputStateInInfo, + cellStateInInfo, + scratchBufferInfo, + outputStateOutInfo, + cellStateOutInfo, + outputInfo, + desc, + paramsInfo); + if (!isSupported) { return false; } @@ -979,12 +1002,15 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& armnn::L2NormalizationDescriptor desc; desc.m_DataLayout = armnn::DataLayout::NHWC; - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsL2NormalizationSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsL2NormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) { return false; } @@ -1038,12 +1064,15 @@ bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, Conve const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsMultiplicationSupported, - data.m_Backends, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMultiplicationSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo); + if (!isSupported) { return false; } @@ -1119,12 +1148,15 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C return Fail("%s: Operation has invalid inputs", __func__); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsSoftmaxSupported, - data.m_Backends, - input.GetTensorInfo(), - outputInfo, - desc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSoftmaxSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo, + desc); + if (!isSupported) { return false; } @@ -1175,12 +1207,16 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsSpaceToDepthSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc)) + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSpaceToDepthSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) { return false; } @@ -1254,11 +1290,14 @@ bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, C reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(), requestedShape.dimensions.data()); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsReshapeSupported, - data.m_Backends, - input.GetTensorInfo(), - reshapeDescriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + reshapeDescriptor); + if (!isSupported) { return false; } @@ -1291,12 +1330,15 @@ bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& m desc.m_Method = armnn::ResizeMethod::Bilinear; desc.m_DataLayout = armnn::DataLayout::NHWC; - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsResizeSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsResizeSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) { return false; } diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index fbd2e08e..d7f4bbb8 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -129,12 +129,15 @@ bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, Conve const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsDivisionSupported, - data.m_Backends, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsDivisionSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outInfo); + if (!isSupported) { return false; } @@ -185,12 +188,15 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve outputInfo.SetShape(InferSubOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsSubtractionSupported, - data.m_Backends, - input0.GetTensorInfo(), - input1.GetTensorInfo(), - outputInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSubtractionSupported, + data.m_Backends, + isSupported, + input0.GetTensorInfo(), + input1.GetTensorInfo(), + outputInfo); + if (!isSupported) { return false; } @@ -263,12 +269,15 @@ bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, Conv const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsMeanSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMeanSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -310,12 +319,15 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList)); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsPadSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPadSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -400,12 +412,16 @@ bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& m } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsSpaceToBatchNdSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSpaceToBatchNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -477,11 +493,14 @@ bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, C return Fail("%s: Could not read output 0", __func__); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsReshapeSupported, - data.m_Backends, - inputInfo, - reshapeDesc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + inputInfo, + reshapeDesc); + if (!isSupported) { return false; } @@ -566,12 +585,15 @@ bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& mod } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsStridedSliceSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsStridedSliceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -639,12 +661,15 @@ bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsPermuteSupported, - data.m_Backends, - inputInfo, - outputInfo, - permuteDesc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPermuteSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + permuteDesc); + if (!isSupported) { return false; } @@ -706,12 +731,15 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsBatchToSpaceNdSupported, - data.m_Backends, - inputInfo, - outputInfo, - batchToSpaceNdDesc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsBatchToSpaceNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + batchToSpaceNdDesc); + if (!isSupported) { return false; } @@ -724,4 +752,4 @@ bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& m } } // namespace hal_1_1 -} // namespace armnn_driver \ No newline at end of file +} // namespace armnn_driver diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 9ccac9b4..4cac12ad 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -272,14 +272,17 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsConvolution2dSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConvolution2dSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + if (!isSupported) { return false; } @@ -428,14 +431,17 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsDepthwiseConvolutionSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsDepthwiseConvolutionSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + if (!isSupported) { return false; } @@ -578,12 +584,15 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con return Fail("%s: Operation has invalid inputs: type mismatch", __func__); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsPadSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPadSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -628,12 +637,15 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape())); } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsPreluSupported, - data.m_Backends, - inputInfo, - alphaInfo, - outputInfo)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPreluSupported, + data.m_Backends, + isSupported, + inputInfo, + alphaInfo, + outputInfo); + if (!isSupported) { return false; } @@ -741,12 +753,15 @@ bool HalPolicy::ConvertResize(const Operation& operation, return false; } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsResizeSupported, - data.m_Backends, - inputInfo, - outputInfo, - descriptor)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsResizeSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + if (!isSupported) { return false; } @@ -796,12 +811,16 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsSpaceToDepthSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc)) + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSpaceToDepthSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) { return false; } diff --git a/ConversionUtils.cpp b/ConversionUtils.cpp index fb71c759..f513d28f 100644 --- a/ConversionUtils.cpp +++ b/ConversionUtils.cpp @@ -150,12 +150,15 @@ armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo, } } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsActivationSupported, - data.m_Backends, - prevLayer->GetOutputSlot(0).GetTensorInfo(), - tensorInfo, - activationDesc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsActivationSupported, + data.m_Backends, + isSupported, + prevLayer->GetOutputSlot(0).GetTensorInfo(), + tensorInfo, + activationDesc); + if (!isSupported) { return nullptr; } diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index c59da1d5..8eb48fe6 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -8,6 +8,8 @@ #include "Utils.hpp" #include +#include +#include #include "armnn/src/armnnUtils/DataLayoutIndexed.hpp" #include "armnn/src/armnnUtils/Permute.hpp" @@ -118,49 +120,47 @@ static bool Fail(const char* formatStr, Args&&... args) return false; } -// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support. -// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e) -template -bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args) -{ - std::vector unsupportedReason(1024+1); - bool isSupported = f(std::forward(args)..., unsupportedReason.data(), unsupportedReason.size()-1); - if(isSupported) - { - return true; - } - else - { - std::string sUnsupportedReason(unsupportedReason.data()); - if (sUnsupportedReason.size() > 0) - { - ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str()); - } else - { - ALOGD("%s: not supported by armnn", funcName); - } - return false; - } -} - -template -bool IsLayerSupportedForAnyBackend(const char* funcName, - IsLayerSupportedFunc f, - const std::vector& backends, - Args&&... args) -{ - for (auto&& backend : backends) - { - if (IsLayerSupported(funcName, f, backend, std::forward(args)...)) - { - return true; - } +// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support. +// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e) +#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \ + std::string reasonIfUnsupported; \ + try { \ + for (auto&& backendId : backends) \ + { \ + auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \ + if (layerSupportObject) \ + { \ + supported = \ + layerSupportObject->func(__VA_ARGS__, armnn::Optional(reasonIfUnsupported)); \ + if (supported) \ + { \ + break; \ + } \ + else \ + { \ + if (reasonIfUnsupported.size() > 0) \ + { \ + ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \ + } \ + else \ + { \ + ALOGD("%s: not supported by armnn", funcName); \ + } \ + } \ + } \ + else \ + { \ + ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \ + } \ + } \ + if (!supported) \ + { \ + ALOGD("%s: not supported by any specified backend", funcName); \ + } \ + } catch (const armnn::InvalidArgumentException &e) { \ + throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \ } - ALOGD("%s: not supported by any specified backend", funcName); - return false; -} - template armnn::TensorShape GetTensorShapeForOperand(const Operand& operand) { @@ -996,10 +996,13 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data); if (tensorPin.IsValid()) { - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsConstantSupported, - data.m_Backends, - tensorPin.GetConstTensor().GetInfo())) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConstantSupported, + data.m_Backends, + isSupported, + tensorPin.GetConstTensor().GetInfo()); + if (isSupported) { return LayerInputHandle(); } @@ -1150,12 +1153,16 @@ bool ConvertToActivation(const HalOperation& operation, return false; } const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsActivationSupported, - data.m_Backends, - input.GetTensorInfo(), - outInfo, - activationDesc)) + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsActivationSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outInfo, + activationDesc); + if (!isSupported) { return false; } @@ -1281,12 +1288,15 @@ bool ConvertPooling2d(const HalOperation& operation, } } - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsPooling2dSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPooling2dSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + if (!isSupported) { return false; } @@ -1393,14 +1403,17 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsConvolution2dSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConvolution2dSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + if (!isSupported) { return false; } @@ -1550,14 +1563,17 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); - if (!IsLayerSupportedForAnyBackend(__func__, - armnn::IsDepthwiseConvolutionSupported, - data.m_Backends, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases)) + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsDepthwiseConvolutionSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + if (!isSupported) { return false; } -- cgit v1.2.1