From ad1ab53f2898862e82f9b354853764fdcd1df97d Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 25 Jul 2019 11:24:42 +0100 Subject: IVGCVSW-3569 Fix conversion of HAL1.2 SpaceToDepth Signed-off-by: Aron Virginas-Tar Change-Id: I82e38c8a9e44e773c099e347f8ce0070bb5f8662 --- 1.2/HalPolicy.cpp | 24 +++++++++++++++++++++--- OutputShapeUtils.cpp | 24 ++++++++++++++++++++++++ OutputShapeUtils.hpp | 3 +++ 3 files changed, 48 insertions(+), 3 deletions(-) diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 5f327c20..4ef7ea4f 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -44,7 +44,6 @@ bool HandledByV1_0(V1_2::OperationType operationType) case V1_0::OperationType::MUL: case V1_0::OperationType::RESHAPE: case V1_0::OperationType::RNN: - case V1_0::OperationType::SPACE_TO_DEPTH: case V1_0::OperationType::SVDF: case V1_0::OperationType::OEM_OPERATION: return true; @@ -159,6 +158,8 @@ bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor); case V1_2::OperationType::SOFTMAX: return ConvertSoftmax(operation, model, data); + case V1_2::OperationType::SPACE_TO_DEPTH: + return ConvertSpaceToDepth(operation, model, data); case V1_2::OperationType::TANH: return ConvertTanH(operation, model, data); default: @@ -966,7 +967,19 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod return Fail("%s: Could not read output 0", __func__); } - const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output); + armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output); + if (IsDynamicTensor(outputInfo)) + { + try + { + ALOGD("Output shape not set, will infer from inputs"); + outputInfo.SetShape(InferSpaceToDepthOutputShape(inputInfo.GetShape(), desc)); + } + catch (armnn::Exception& e) + { + return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what()); + } + } bool isSupported = false; FORWARD_LAYER_SUPPORT_FUNC(__func__, @@ -985,7 +998,12 @@ bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& mod assert(layer != nullptr); input.Connect(layer->GetInputSlot(0)); - return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data); + return SetupAndTrackLayerOutputSlot(operation, + 0, + *layer, + model, + data, + armnn::Optional(outputInfo)); } bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data) diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp index 0c897d11..ecec0b92 100644 --- a/OutputShapeUtils.cpp +++ b/OutputShapeUtils.cpp @@ -8,6 +8,7 @@ #include #include +#include #include namespace @@ -167,6 +168,29 @@ TensorShape InferResizeOutputShape(const TensorShape& inputShape, const ResizeDe return outputShape; } +TensorShape InferSpaceToDepthOutputShape(const TensorShape& inputShape, const SpaceToDepthDescriptor& descriptor) +{ + TensorShape outputShape(inputShape); + + armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout); + + const unsigned int cIndex = dataLayoutIndexed.GetChannelsIndex(); + const unsigned int wIndex = dataLayoutIndexed.GetWidthIndex(); + const unsigned int hIndex = dataLayoutIndexed.GetHeightIndex(); + + if (descriptor.m_BlockSize == 0) + { + throw InvalidArgumentException("Block size must be greater than zero"); + } + + outputShape[cIndex] = inputShape[cIndex] * descriptor.m_BlockSize * descriptor.m_BlockSize; + + outputShape[hIndex] = inputShape[hIndex] / descriptor.m_BlockSize; + outputShape[wIndex] = inputShape[wIndex] / descriptor.m_BlockSize; + + return outputShape; +} + TensorShape InferSubOutputShape(const TensorShape& input0Shape, const TensorShape& input1Shape) { return CalculateMaxShape(input0Shape, input1Shape); diff --git a/OutputShapeUtils.hpp b/OutputShapeUtils.hpp index 222c1235..85cafbf6 100644 --- a/OutputShapeUtils.hpp +++ b/OutputShapeUtils.hpp @@ -32,6 +32,9 @@ armnn::TensorShape InferPreluOutputShape(const armnn::TensorShape& inputShape, c armnn::TensorShape InferResizeOutputShape(const armnn::TensorShape& inputShape, const armnn::ResizeDescriptor& descriptor); +armnn::TensorShape InferSpaceToDepthOutputShape(const armnn::TensorShape& inputShape, + const armnn::SpaceToDepthDescriptor& descriptor); + armnn::TensorShape InferSubOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape); } // namespace armnn_driver -- cgit v1.2.1