From aed08ac9b985766a6ce442695ff1742defb0a189 Mon Sep 17 00:00:00 2001 From: Kevin May Date: Thu, 12 Dec 2019 16:33:31 +0000 Subject: IVGCVSW-4262 Add Calls to IsReshapeSupported and IsPermuteSupported !armnn:2643 * Add calls before addition of these layers in ConvertConcatenation * Add outputInfo parameter wherever needed for IsReshapeSupported Signed-off-by: Kevin May Change-Id: Ic5d142ea046161960ff2fc137bd261ebb4e6ac0c --- 1.2/HalPolicy.cpp | 7 ++-- ConversionUtils.hpp | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 98 insertions(+), 8 deletions(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 61daeef5..bf4525aa 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -749,6 +749,7 @@ bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model data.m_Backends, isSupported, input.GetTensorInfo(), + outputInfo, reshapeDescriptor); if (!isSupported) @@ -1346,7 +1347,7 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C IConnectableLayer* layer = data.m_Network->AddMaximumLayer(); assert(layer != nullptr); - bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data); + bool isReshapeSupported = BroadcastTensor(input0, input1, outInfo, layer, data); if (!isReshapeSupported) { return false; @@ -1401,7 +1402,7 @@ bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, C IConnectableLayer* const layer = data.m_Network->AddMinimumLayer(); assert(layer != nullptr); - bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data); + bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, layer, data); if (!isReshapeSupported) { return false; @@ -1564,7 +1565,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con return Fail("%s: AddPreluLayer failed", __func__); } - bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data); + bool isReshapeSupported = BroadcastTensor(input, alpha, outputInfo, layer, data); if (!isReshapeSupported) { return false; diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 72a668f7..b3f1ac72 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -241,6 +241,7 @@ armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, + const armnn::TensorInfo& outputInfo, armnn::IConnectableLayer* startLayer, ConversionData& data) { @@ -292,6 +293,7 @@ bool BroadcastTensor(LayerInputHandle& input0, data.m_Backends, isSupported, reshapedInfo, + outputInfo, reshapeDescriptor); if (!isSupported) { @@ -560,6 +562,41 @@ void SwizzleInputs(armnn::INetwork& network, } } +bool CheckReshapeSupported(ConversionData& data, + std::vector& inputs, + std::vector& inputShapes, + const armnn::PermutationVector& mapping, + const armnn::TensorInfo& outputInfo) +{ + if (!mapping.IsEqual(IdentityPermutation4D)) + { + size_t nInputs = inputs.size(); + for (size_t i=0; i & permutationPair) @@ -1557,7 +1594,7 @@ bool ConvertAdd(const HalOperation& operation, const HalModel& model, Conversion if (endLayer != nullptr) { - bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data); + bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data); if (!isReshapeSupported) { return false; @@ -1742,6 +1779,22 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, tensorDimensionsAdded = 2; } + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape(); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + operandInputHandle.GetTensorInfo(), + reshapeInfo, + reshapeDescriptor); + if (!isSupported) + { + return false; + } + armnn::IConnectableLayer& newReshape = AddReshapeLayer( *data.m_Network, operandInputHandle, @@ -1797,7 +1850,10 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, // this is no-op for identity swizzles, otherwise it replaces both // the handles and shapes with the swizzled layer output handles and shapes - SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first); + if (!CheckReshapeSupported(data, inputHandles, inputShapes, permutationPair.first, outputInfo)) + { + return false; + } // Create an armnn concat layer descriptor - this will also perform validation on the input shapes armnn::OriginsDescriptor concatDescriptor; @@ -1853,6 +1909,21 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, if (needPermute) { + armnn::PermuteDescriptor permuteDesc; + permuteDesc.m_DimMappings = permutationPair.second; + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPermuteSupported, + data.m_Backends, + isSupported, + layer->GetOutputSlot(0).GetTensorInfo(), + outputInfo, + permuteDesc); + if (!isSupported) + { + return false; + } // Add permutation layer and connect the output to it, the permutation becomes the output layer armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network, layer->GetOutputSlot(0), @@ -1875,6 +1946,22 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] })); } + armnn::ReshapeDescriptor reshapeDescriptor; + reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape(); + + bool isSupported = false; + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + layer->GetOutputSlot(0).GetTensorInfo(), + afterConcatInfo, + reshapeDescriptor); + if (!isSupported) + { + return false; + } + layer = &AddReshapeLayer( *data.m_Network, layer->GetOutputSlot(0), @@ -2321,7 +2408,7 @@ bool ConvertDiv(const HalOperation& operation, const HalModel& model, Conversion if (endLayer) { - bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data); + bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data); if (!isReshapeSupported) { return false; @@ -2922,7 +3009,7 @@ bool ConvertMul(const HalOperation& operation, const HalModel& model, Conversion if (endLayer != nullptr) { - bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data); + bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data); if (!isReshapeSupported) { return false; @@ -3061,6 +3148,7 @@ bool ConvertReshape(const HalOperation& operation, const HalModel& model, Conver data.m_Backends, isSupported, input.GetTensorInfo(), + GetTensorInfoForOperand(*outputOperand), reshapeDescriptor); if (!isSupported) { @@ -3130,7 +3218,7 @@ bool ConvertSub(const HalOperation& operation, const HalModel& model, Conversion if (endLayer) { - bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data); + bool isReshapeSupported = BroadcastTensor(input0, input1, outputInfo, startLayer, data); if (!isReshapeSupported) { return false; @@ -3214,6 +3302,7 @@ bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, Conver data.m_Backends, isSupported, inputInfo, + outputInfo, reshapeDesc); if (!isSupported) { -- cgit v1.2.1