From 4bd9a745df49bdf11e03f932af6eca6b61ddb0a1 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Wed, 12 Aug 2020 12:58:50 +0100 Subject: IVGCVSW-5182 Update Convert functions to use ShapeInferenceMethod. 1/2. * ConvertToActivation * ConvertAdd * ConvertArgMinMax * ConvertConv2d * ConvertDepthToSpace * ConvertDepthwiseConv2d * ConvertDiv * ConvertFloor * ConvertFullyConnected * ConvertL2Normalization * ConvertLocalResponseNormalization * ConvertMean * ConvertMul * ConvertPad * ConvertReshape * ConvertSub * ConvertStridedSlice * ConvertTranspose * ConvertBatchToSpaceNd * ConvertSpaceToBatchNd * ConvertComparison_1_2 * ConvertConv2d_1_2 * ConvertDepthwiseConv2d_1_2 * ConvertElementwiseUnary * ConvertExpandDims * ConvertGather * ConvertGroupedConv2d * ConvertInstanceNormalization * ConvertLogSoftmax * ConvertMaximum * ConvertMinimum * ConvertPadV2 * ConvertPrelu * ConvertQuantize * ConvertResize * ConvertSpaceToDepth * ConvertSoftmax * ConvertTransposeConv2d Signed-off-by: Finn Williams Signed-off-by: Teresa Charlin Signed-off-by: Kevin May Change-Id: Idacf16e5eab56d83fce293570bbc89381ae056dc --- ArmnnPreparedModel_1_3.cpp | 10 +- ConversionUtils.hpp | 648 ++++++++++++++++++++++++++++----------------- ConversionUtils_1_2.hpp | 611 ++++++++++++++++++++++++++---------------- ConversionUtils_1_3.hpp | 5 +- Utils.cpp | 5 + 5 files changed, 806 insertions(+), 473 deletions(-) diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp index a27c7a39..386cc174 100644 --- a/ArmnnPreparedModel_1_3.cpp +++ b/ArmnnPreparedModel_1_3.cpp @@ -451,6 +451,8 @@ Return ArmnnPreparedModel_1_3::PrepareMemoryForOu return V1_3::ErrorStatus::GENERAL_FAILURE; } + const size_t outputSize = outputTensorInfo.GetNumBytes(); + unsigned int count = 0; std::for_each(outputArg.dimensions.begin(), outputArg.dimensions.end(), [&](auto dim) { @@ -466,14 +468,13 @@ Return ArmnnPreparedModel_1_3::PrepareMemoryForOu count++; }); - const size_t outputSize = outputTensorInfo.GetNumBytes(); - outputs.emplace_back(i, outputTensor); outputShapes[i] = ComputeShape(outputTensorInfo); if (outputArg.location.length < outputSize) { - ALOGW("ArmnnPreparedModel_1_3::Execute failed"); + ALOGW("ArmnnPreparedModel_1_3::Execute failed outputArg.location.length (%s) < outputSize (%s)", + std::to_string(outputArg.location.length).c_str(), std::to_string(outputSize).c_str()); outputShapes[i].isSufficient = false; return V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; } @@ -481,7 +482,8 @@ Return ArmnnPreparedModel_1_3::PrepareMemoryForOu const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getHidlMemory().size(); if (bufferSize < outputSize) { - ALOGW("ArmnnPreparedModel_1_3::Execute failed"); + ALOGW("ArmnnPreparedModel_1_3::Execute failed bufferSize (%s) < outputSize (%s)", + std::to_string(bufferSize).c_str(), std::to_string(outputSize).c_str()); outputShapes[i].isSufficient = false; return V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; } diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 474d1a58..f2f95ac8 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -1395,12 +1395,30 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation, armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex); + if (overrideOutputInfo == nullptr) + { + outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand)); + } + else + { + outputSlot.SetTensorInfo(*overrideOutputInfo); + } + + // Type one dynamic tensors require the previous layer's output shape for inference + if (!layer.GetInputSlot(0).GetConnection() && + IsDynamicTensor(outputSlot.GetTensorInfo())) + { + return false; + } + bool isSupported = false; if (validateFunc && layer.GetInputSlot(0).GetConnection() && IsDynamicTensor(outputSlot.GetTensorInfo())) { + // IsTensorInfoSet will infer the dynamic output shape outputSlot.IsTensorInfoSet(); + // Once the shape is inferred we can validate it validateFunc(outputSlot.GetTensorInfo(), isSupported); if(!isSupported) @@ -1413,17 +1431,6 @@ bool SetupAndTrackLayerOutputSlot(const HalOperation& operation, return false; } } - else - { - if (overrideOutputInfo == nullptr) - { - outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand)); - } - else - { - outputSlot.SetTensorInfo(*overrideOutputInfo); - } - } const uint32_t operandIndex = operation.outputs[operationOutputIndex]; data.m_OutputSlotForOperand[operandIndex] = &outputSlot; @@ -1810,19 +1817,28 @@ bool ConvertAdd(const HalOperation& operation, const HalModel& model, Conversion const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicTensor(outputInfo)) + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) { - return Fail("%s: Dynamic output tensors are not supported", __func__); + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsAdditionSupported, + data.m_Backends, + isSupported, + inputInfo0, + inputInfo1, + outputInfo); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); } - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsAdditionSupported, - data.m_Backends, - isSupported, - inputInfo0, - inputInfo1, - outputInfo); if (!isSupported) { return false; @@ -1839,7 +1855,7 @@ bool ConvertAdd(const HalOperation& operation, const HalModel& model, Conversion return false; } - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } else { @@ -1962,7 +1978,6 @@ bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, return Fail("%s: Operation has no outputs", __func__); } - armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand); armnn::TensorShape outputShape = outputInfo.GetShape(); @@ -2247,11 +2262,6 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - // ArmNN does not currently support non-fixed weights or bias const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); @@ -2310,15 +2320,28 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers armnn::Optional biases(bias.GetInfo()); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsConvolution2dSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConvolution2dSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + if (!isSupported) { return false; @@ -2341,7 +2364,7 @@ bool ConvertConv2d(const HalOperation& operation, const HalModel& model, Convers input.Connect(startLayer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 1, model); @@ -2524,15 +2551,29 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model armnn::Optional biases(bias.GetInfo()); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsDepthwiseConvolutionSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsDepthwiseConvolutionSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + + if (!isSupported) { return false; @@ -2553,7 +2594,7 @@ bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model input.Connect(startLayer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } return Fail("%s: ProcessActivation failed", __func__); } @@ -2691,18 +2750,27 @@ bool ConvertFloor(const HalOperation& operation, const HalModel& model, Conversi } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicTensor(outputInfo)) + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) { - return Fail("%s: Dynamic output tensors are not supported", __func__); + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsFloorSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); } - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsFloorSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - outputInfo); if (!isSupported) { return false; @@ -2712,7 +2780,7 @@ bool ConvertFloor(const HalOperation& operation, const HalModel& model, Conversi assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } inline bool IsQSymm8(const V1_0::Operand&) @@ -2891,11 +2959,6 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin(operation, model, data, 1); ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D @@ -2944,7 +3007,9 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, } bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, IsFullyConnectedSupported, data.m_Backends, isSupported, @@ -2953,6 +3018,17 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, weights.GetInfo(), bias.GetInfo(), desc); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + if (!isSupported) { return false; @@ -2980,7 +3056,7 @@ bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, input.Connect(startLayer->GetInputSlot(0)); } - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } else { @@ -3015,10 +3091,6 @@ bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } if (outputInfo.GetNumDimensions() != 4u) { return Fail("%s: Tensor Rank other than 4 is not supported", __func__); @@ -3028,13 +3100,26 @@ bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model desc.m_DataLayout = armnn::DataLayout::NHWC; bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsL2NormalizationSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsL2NormalizationSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + if (!isSupported) { return false; @@ -3044,7 +3129,7 @@ bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 1, model); if (!axisOperand) @@ -3194,13 +3284,26 @@ bool ConvertMean(const HalOperation& operation, const HalModel& model, Conversio descriptor.m_KeepDims = keepDims > 0; bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsMeanSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsMeanSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + if (!isSupported) { return false; @@ -3210,7 +3313,7 @@ bool ConvertMean(const HalOperation& operation, const HalModel& model, Conversio assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } else { @@ -3323,19 +3435,28 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) + + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) { - return Fail("%s: Dynamic output tensors are not supported", __func__); + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsPadSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); } - bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsPadSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); if (!isSupported) { return false; @@ -3344,9 +3465,8 @@ bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - layer->GetOutputSlot(0).SetTensorInfo(outputInfo); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, model, data); if (!input.IsValid()) { @@ -3405,14 +3519,29 @@ bool ConvertReshape(const HalOperation& operation, const HalModel& model, Conver reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(), requestedShape.dimensions.data()); + const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand); + bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsReshapeSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - GetTensorInfoForOperand(*outputOperand), - reshapeDescriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo, + reshapeDescriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + if (!isSupported) { return false; @@ -3422,7 +3551,7 @@ bool ConvertReshape(const HalOperation& operation, const HalModel& model, Conver assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } return Fail("%s: ProcessActivation failed", __func__); @@ -3517,7 +3655,6 @@ bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, Conver { return Fail("%s: Could not read output 0", __func__); } - if (IsDynamicTensor(GetTensorInfoForOperand(*output))) { return Fail("%s: Dynamic output tensors are not supported", __func__); @@ -3567,6 +3704,7 @@ bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, Conver inputInfo, outputInfo, reshapeDesc); + if (!isSupported) { return false; @@ -3606,10 +3744,6 @@ bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, C } const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } const HalOperand* beginOperand = GetInputOperand(operation, 1, model); const HalOperand* endOperand = GetInputOperand(operation, 2, model); @@ -3663,13 +3797,26 @@ bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, C } bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsStridedSliceSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsStridedSliceSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } + if (!isSupported) { return false; @@ -3704,7 +3851,7 @@ bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, C assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 1, model); if (!blockOperand) @@ -3849,13 +4000,27 @@ bool ConvertBatchToSpaceNd(const HalOperation& operation, batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}}; bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsBatchToSpaceNdSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - batchToSpaceNdDesc); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsBatchToSpaceNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + batchToSpaceNdDesc); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + + if (!isSupported) { return false; @@ -3865,7 +4030,7 @@ bool ConvertBatchToSpaceNd(const HalOperation& operation, assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 1, model); const HalOperand* paddingsOperand = GetInputOperand(operation, 2, model); @@ -3955,13 +4116,26 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, } bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSpaceToBatchNdSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSpaceToBatchNdSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } + if (!isSupported) { return false; @@ -3971,7 +4145,7 @@ bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } } // namespace armnn_driver diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index 824a8f4a..0f47ad31 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd. All rights reserved. +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -138,22 +138,30 @@ bool ConvertComparison_1_2(const HalOperation& operation, const TensorInfo& inputInfo1 = input1.GetTensorInfo(); const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - ComparisonDescriptor descriptor(comparisonOperation); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsComparisonSupported, - data.m_Backends, - isSupported, - inputInfo0, - inputInfo1, - outputInfo, - descriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsComparisonSupported, + data.m_Backends, + isSupported, + inputInfo0, + inputInfo1, + outputInfo, + descriptor); + + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } if (!isSupported) { @@ -169,7 +177,10 @@ bool ConvertComparison_1_2(const HalOperation& operation, return false; } - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + input0.Connect(layer->GetInputSlot(0)); + input1.Connect(layer->GetInputSlot(1)); + + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template biases(bias.GetInfo()); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsConvolution2dSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConvolution2dSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } if (!isSupported) { @@ -329,7 +347,7 @@ bool ConvertConv2d_1_2(const HalOperation& operation, const HalModel& model, Con input.Connect(startLayer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } template(operation, 1, model); @@ -476,15 +489,27 @@ bool ConvertDepthwiseConv2d_1_2(const HalOperation& operation, const HalModel& m Optional biases(bias.GetInfo()); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsDepthwiseConvolutionSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsDepthwiseConvolutionSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } if (!isSupported) { @@ -556,21 +581,29 @@ bool ConvertElementwiseUnary(const HalOperation& operation, const TensorInfo& inputInfo = input.GetTensorInfo(); const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } - ElementwiseUnaryDescriptor descriptor(unaryOperation); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsElementwiseUnarySupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); + + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsElementwiseUnarySupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } if (!isSupported) { @@ -579,10 +612,9 @@ bool ConvertElementwiseUnary(const HalOperation& operation, IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor); assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 1, HalOperandType::INT32, axis, model, data)) @@ -640,13 +668,25 @@ bool ConvertExpandDims(const HalOperation& operation, const HalModel& model, Con reshapeDescriptor.m_TargetShape = targetShape; bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsReshapeSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - outputInfo, - reshapeDescriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsReshapeSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo, + reshapeDescriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } if (!isSupported) { @@ -657,7 +697,7 @@ bool ConvertExpandDims(const HalOperation& operation, const HalModel& model, Con assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); indices.Connect(layer->GetInputSlot(1)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(numGroups * channelMultiplier, &groupOutputInfo), - outputInfo, - concatDescriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsConcatSupported, + data.m_Backends, + isSupported, + std::vector(numGroups * channelMultiplier, &groupOutputInfo), + outputInfo, + concatDescriptor); + }; + + if(!IsDynamicTensor(outputInfo)) + { + validateFunc(outputInfo, isSupported); + } + else + { + isSupported = AreDynamicTensorsSupported(); + } + if (!isSupported) { return false; @@ -1072,7 +1130,7 @@ bool ConvertGroupedConv2d(const HalOperation& operation, const HalModel& model, return Fail("%s: ProcessActivation failed", __func__); } - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } template(operation, 4, model, data); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsInstanceNormalizationSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - outputInfo, - desc); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsInstanceNormalizationSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo, + desc); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } + if (!isSupported) { return false; @@ -1162,7 +1229,7 @@ bool ConvertInstanceNormalization(const HalOperation& operation, const HalModel& IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateAddPadLayer(descriptor); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - layer->GetOutputSlot(0).SetTensorInfo(outputInfo); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateGetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 3, model, data); @@ -1906,13 +2018,25 @@ bool ConvertResize(const HalOperation& operation, descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsResizeSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - descriptor); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsResizeSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + descriptor); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } if (!isSupported) { @@ -1920,12 +2044,10 @@ bool ConvertResize(const HalOperation& operation, } IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor); - assert(layer != nullptr); - input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } template(operation, 2, model, data); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSpaceToDepthSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSpaceToDepthSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } + if (!isSupported) { return false; @@ -1991,7 +2122,7 @@ bool ConvertSpaceToDepth(const HalOperation& operation, const HalModel& model, C assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templatetype; @@ -2056,13 +2183,26 @@ bool ConvertSoftmax(const HalOperation& operation, const HalModel& model, Conver } bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsSoftmaxSupported, - data.m_Backends, - isSupported, - input.GetTensorInfo(), - outputInfo, - desc); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsSoftmaxSupported, + data.m_Backends, + isSupported, + input.GetTensorInfo(), + outputInfo, + desc); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } + if (!isSupported) { return false; @@ -2072,7 +2212,7 @@ bool ConvertSoftmax(const HalOperation& operation, const HalModel& model, Conver assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc); } templateGetInputSlot(1)); cellStateIn.Connect(layer->GetInputSlot(2)); - return ( (IsDynamicTensor(scratchBufferInfo)? SetupAndTrackLayerOutputSlotAndOverrideTensorInfo( @@ -2521,10 +2662,6 @@ bool ConvertTransposeConv2d(const HalOperation& operation, const HalModel& model const TensorInfo& inputInfo = input.GetTensorInfo(); const TensorInfo& outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicTensor(outputInfo)) - { - return Fail("%s: Dynamic output tensors are not supported", __func__); - } // ArmNN does not currently support non-fixed weights or bias // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ] @@ -2658,15 +2795,27 @@ bool ConvertTransposeConv2d(const HalOperation& operation, const HalModel& model Optional biases(bias.GetInfo()); bool isSupported = false; - FORWARD_LAYER_SUPPORT_FUNC(__func__, - IsTransposeConvolution2dSupported, - data.m_Backends, - isSupported, - inputInfo, - outputInfo, - desc, - weights.GetInfo(), - biases); + auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + IsTransposeConvolution2dSupported, + data.m_Backends, + isSupported, + inputInfo, + outputInfo, + desc, + weights.GetInfo(), + biases); + }; + + if(IsDynamicTensor(outputInfo)) + { + isSupported = AreDynamicTensorsSupported(); + } + else + { + validateFunc(outputInfo, isSupported); + } if (!isSupported) { return false; @@ -2687,7 +2836,7 @@ bool ConvertTransposeConv2d(const HalOperation& operation, const HalModel& model input.Connect(startLayer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data); + return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data, nullptr, validateFunc); } } // armnn_driver namespace \ No newline at end of file diff --git a/ConversionUtils_1_3.hpp b/ConversionUtils_1_3.hpp index d5d89df1..e6961253 100644 --- a/ConversionUtils_1_3.hpp +++ b/ConversionUtils_1_3.hpp @@ -146,7 +146,6 @@ bool ConvertFill(const HalOperation& operation, const HalModel& model, Conversio IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor); assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - layer->GetOutputSlot(0).SetTensorInfo(outputInfo); return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); } @@ -667,6 +666,10 @@ bool ConvertRank(const HalOperation& operation, const HalModel& model, Conversio } armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand); + if (IsDynamicTensor(outInfo)) + { + return Fail("%s: Dynamic output tensors are not supported", __func__); + } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, diff --git a/Utils.cpp b/Utils.cpp index db1b6e68..77575d70 100644 --- a/Utils.cpp +++ b/Utils.cpp @@ -577,6 +577,11 @@ bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo) { return true; } + // Account for the usage of the TensorShape empty constructor + if (tensorInfo.GetNumDimensions() == 0) + { + return true; + } return !tensorInfo.GetShape().AreAllDimensionsSpecified(); } -- cgit v1.2.1