From 573a8fa04ef0774fe44a4ac6669582a6bf753d6e Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Tue, 23 Jul 2019 14:01:37 +0100 Subject: IVGCVSW-3553 Fix failing zero_sized tests Signed-off-by: Aron Virginas-Tar Change-Id: Idd10f34babc0d2552d599872b853ba5fb5c98351 --- 1.0/HalPolicy.cpp | 9 +++++---- 1.1/HalPolicy.cpp | 5 +++-- 1.2/HalPolicy.cpp | 17 +++++++++-------- ConversionUtils.hpp | 5 +++++ OutputShapeUtils.cpp | 7 +------ OutputShapeUtils.hpp | 4 ---- Utils.cpp | 6 ++++++ Utils.hpp | 3 +++ 8 files changed, 32 insertions(+), 24 deletions(-) diff --git a/1.0/HalPolicy.cpp b/1.0/HalPolicy.cpp index 8c617001..a2c8252a 100644 --- a/1.0/HalPolicy.cpp +++ b/1.0/HalPolicy.cpp @@ -9,6 +9,7 @@ #include "FullyConnected.hpp" #include "OutputShapeUtils.hpp" +#include "Utils.hpp" namespace armnn_driver { @@ -397,7 +398,7 @@ bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from input"); outputInfo.SetShape(input.GetTensorInfo().GetShape()); @@ -477,7 +478,7 @@ bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& m const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(inputInfo.GetShape()); @@ -1004,7 +1005,7 @@ bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& const armnn::TensorInfo& inputInfo = input.GetTensorInfo(); armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(inputInfo.GetShape()); @@ -1147,7 +1148,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from input"); outputInfo.SetShape(input.GetTensorInfo().GetShape()); diff --git a/1.1/HalPolicy.cpp b/1.1/HalPolicy.cpp index d7f4bbb8..6687b123 100644 --- a/1.1/HalPolicy.cpp +++ b/1.1/HalPolicy.cpp @@ -6,6 +6,7 @@ #include "HalPolicy.hpp" #include "OutputShapeUtils.hpp" +#include "Utils.hpp" #include "../1.0/HalPolicy.hpp" @@ -182,7 +183,7 @@ bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, Conve } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(InferSubOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); @@ -313,7 +314,7 @@ bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, Conve } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList)); diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 3c00388c..f93629ef 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -6,6 +6,7 @@ #include "HalPolicy.hpp" #include "OutputShapeUtils.hpp" +#include "Utils.hpp" #include "../1.0/HalPolicy.hpp" #include "../1.1/HalPolicy.hpp" @@ -270,7 +271,7 @@ bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, Co desc.m_BiasEnabled = true; armnn::Optional biases(bias.GetInfo()); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { try { @@ -450,7 +451,7 @@ bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& armnn::Optional biases(bias.GetInfo()); armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { try { @@ -522,7 +523,7 @@ bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, C } armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicOutput(outInfo)) + if (IsDynamicTensor(outInfo)) { ALOGD("Output shape not set, will infer from inputs"); outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape())); @@ -571,7 +572,7 @@ bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, C } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(), @@ -628,7 +629,7 @@ bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, Con } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList)); @@ -726,7 +727,7 @@ bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, Con const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo(); armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from inputs"); outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape())); @@ -848,7 +849,7 @@ bool HalPolicy::ConvertResize(const Operation& operation, return false; } - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { try { @@ -961,7 +962,7 @@ bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, C } armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand); - if (IsDynamicOutput(outputInfo)) + if (IsDynamicTensor(outputInfo)) { ALOGD("Output shape not set, will infer from input"); outputInfo.SetShape(input.GetTensorInfo().GetShape()); diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp index 8eb48fe6..755e3bef 100644 --- a/ConversionUtils.hpp +++ b/ConversionUtils.hpp @@ -974,6 +974,11 @@ LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation, try { armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand); + if (IsDynamicTensor(operandTensorInfo)) + { + Fail("%s: dynamic input tensors are not supported", __func__); + return LayerInputHandle(); + } switch (operand->lifetime) { diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp index e3812a36..0c897d11 100644 --- a/OutputShapeUtils.cpp +++ b/OutputShapeUtils.cpp @@ -96,11 +96,6 @@ namespace armnn_driver using namespace armnn; -bool IsDynamicOutput(const TensorInfo& outputInfo) -{ - return outputInfo.GetNumElements() == 0u; -} - TensorShape InferConvolution2dOutputShape(const TensorShape& inputShape, const TensorShape& kernelShape, const Convolution2dDescriptor& descriptor) @@ -177,4 +172,4 @@ TensorShape InferSubOutputShape(const TensorShape& input0Shape, const TensorShap return CalculateMaxShape(input0Shape, input1Shape); } -} // namespace armnn_driver \ No newline at end of file +} // namespace armnn_driver diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp index 7452ced9..222c1235 100644 --- a/OutputShapeUtils.hpp +++ b/OutputShapeUtils.hpp @@ -10,8 +10,6 @@ namespace armnn_driver { -bool IsDynamicOutput(const armnn::TensorInfo& outputInfo); - armnn::TensorShape InferConvolution2dOutputShape(const armnn::TensorShape& inputShape, const armnn::TensorShape& kernelShape, const armnn::Convolution2dDescriptor& descriptor); @@ -37,5 +35,3 @@ armnn::TensorShape InferResizeOutputShape(const armnn::TensorShape& inputShape, armnn::TensorShape InferSubOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape); } // namespace armnn_driver - - diff --git a/Utils.cpp b/Utils.cpp index c3c6310b..d3d62a02 100644 --- a/Utils.cpp +++ b/Utils.cpp @@ -328,4 +328,10 @@ void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled, profiler->Print(fileStream); } +bool IsDynamicTensor(const armnn::TensorInfo& outputInfo) +{ + // Dynamic tensors have at least one 0-sized dimension + return outputInfo.GetNumElements() == 0u; +} + } // namespace armnn_driver diff --git a/Utils.hpp b/Utils.hpp index 5aac4716..267e519e 100644 --- a/Utils.hpp +++ b/Utils.hpp @@ -144,4 +144,7 @@ void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwor } } +/// Checks if a tensor info represents a dynamic tensor +bool IsDynamicTensor(const armnn::TensorInfo& outputInfo); + } // namespace armnn_driver -- cgit v1.2.1