diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-07-15 14:29:09 +0100 |
---|---|---|
committer | Áron Virginás-Tar <aron.virginas-tar@arm.com> | 2019-07-16 13:31:26 +0000 |
commit | 2b173126319343e49d1f081cfb58eacd96afc715 (patch) | |
tree | b51eaf9d648cb93753c6adc4a075dcb6aea3a68e /OutputShapeUtils.cpp | |
parent | d759323d159a50298af937dfb2c519025efe3900 (diff) | |
download | android-nn-driver-2b173126319343e49d1f081cfb58eacd96afc715.tar.gz |
IVGCVSW-3452 Support dynamic output shape in hal_1_2::HalPolicy::ConvertConv2d
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I8694e1f1c62da6f74eb356558b17a63758ccfdad
Diffstat (limited to 'OutputShapeUtils.cpp')
-rw-r--r-- | OutputShapeUtils.cpp | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/OutputShapeUtils.cpp b/OutputShapeUtils.cpp index b6cdb316..6a9bf90c 100644 --- a/OutputShapeUtils.cpp +++ b/OutputShapeUtils.cpp @@ -5,6 +5,8 @@ #include "OutputShapeUtils.hpp" +#include <DataLayoutIndexed.hpp> + #include <algorithm> #include <vector> @@ -54,6 +56,48 @@ bool IsDynamicOutput(const TensorInfo& outputInfo) return outputInfo.GetNumElements() == 0u; } +TensorShape InferConvolution2dOutputShape(const TensorShape& inputShape, + const TensorShape& kernelShape, + const Convolution2dDescriptor& descriptor) +{ + if (inputShape.GetNumDimensions() != 4) + { + throw InvalidArgumentException("Input shape for Convolution2d must be 4D"); + } + + armnnUtils::DataLayoutIndexed dataLayoutIndex(descriptor.m_DataLayout); + + const unsigned int cIndex = dataLayoutIndex.GetChannelsIndex(); + const unsigned int wIndex = dataLayoutIndex.GetWidthIndex(); + const unsigned int hIndex = dataLayoutIndex.GetHeightIndex(); + + const unsigned int wInput = inputShape[wIndex]; + const unsigned int hInput = inputShape[hIndex]; + + const unsigned int wKernel = kernelShape[wIndex]; + const unsigned int wDilated = wKernel + (descriptor.m_DilationX - 1) * (wKernel - 1); + + const unsigned int wRead = (wInput + descriptor.m_PadLeft + descriptor.m_PadRight) - wDilated; + const unsigned int wOutput = 1 + (wRead / descriptor.m_StrideX); + + const unsigned int hKernel = kernelShape[hIndex]; + const unsigned int hDilated = hKernel + (descriptor.m_DilationY - 1) * (hKernel - 1); + + const unsigned int hRead = (hInput + descriptor.m_PadTop + descriptor.m_PadBottom) - hDilated; + const unsigned int hOutput = 1 + (hRead / descriptor.m_StrideY); + + const unsigned int batches = inputShape[0]; + const unsigned int channels = kernelShape[0]; + + TensorShape outputShape(4); + outputShape[0] = batches; + outputShape[cIndex] = channels; + outputShape[wIndex] = wOutput; + outputShape[hIndex] = hOutput; + + return outputShape; +} + TensorShape InferMaximumOutputShape(const armnn::TensorShape& input0Shape, const armnn::TensorShape& input1Shape) { |