From 024ef0b460c802a7c841dcba4b7e894e714d4512 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Wed, 26 Apr 2023 11:19:03 +0100 Subject: BugFix: calculate explicit padding for Transpose Convolution using output size * If the output shape is given in Transpose convolution, use it to calculate the padding Signed-off-by: Teresa Charlin Change-Id: I0bf3dee94c2ce606ed67fb385018b220188c3017 --- src/armnn/layers/TransposeConvolution2dLayer.cpp | 23 +++--- src/armnnTfLiteParser/TfLiteParser.cpp | 99 ++++++++++++++++++------ 2 files changed, 88 insertions(+), 34 deletions(-) (limited to 'src') diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp index f79c5887fb..534d6b431e 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.cpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017,2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -98,20 +98,25 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs() ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null."); std::vector expectedOutputShape; + std::vector outputShapeGivenAsInput; + + expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + m_Weight->GetTensorInfo().GetShape() }); + + ARMNN_ASSERT(expectedOutputShape.size() == 1); + // If output_shape was specified then use it rather than calculate an inferred output shape. if (m_Param.m_OutputShapeEnabled) { TensorShape shapeAsTensorShape(static_cast(m_Param.m_OutputShape.size()), m_Param.m_OutputShape.data()); - expectedOutputShape.push_back(shapeAsTensorShape); - } - else - { - expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - m_Weight->GetTensorInfo().GetShape() }); - } + outputShapeGivenAsInput.push_back(shapeAsTensorShape); - ARMNN_ASSERT(expectedOutputShape.size() == 1); + ARMNN_ASSERT(outputShapeGivenAsInput.size() == 1); + ARMNN_ASSERT_MSG(expectedOutputShape == outputShapeGivenAsInput, + "TransposeConvolution2dLayer: output calculated by InferOutputShapes and " + "the output given as an input parameter to the layer are not matching"); + } ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer"); } diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 2a7f049470..244f1fa197 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -411,6 +411,29 @@ void CalcPadding(uint32_t inputSize, } } +// Function that calculates explicit padding when the output shape is known. +// At the moment the output is only given as an input parameter in Transpose Convolution, +// not in Convolution and Depthwise Convolution +void CalcPadding(uint32_t inputSize, + uint32_t filterSize, + uint32_t stride, + uint32_t dilation, + uint32_t& paddingFront, + uint32_t& paddingBack, + tflite::Padding padding, + uint32_t outputSize) +{ + IgnoreUnused(dilation); + paddingFront = 0; + paddingBack = 0; + if (padding == tflite::Padding_SAME) + { + uint32_t totalPadding = (inputSize - 1) * stride + filterSize - outputSize; + paddingFront = totalPadding / 2; + paddingBack = totalPadding - paddingFront; + } +} + armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr, const std::vector& shape, const bool outputTensor = false) @@ -1608,6 +1631,17 @@ void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorI auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); CHECK_VALID_SIZE(outputs.size(), 1); + + armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2); + armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); + + // TfLite uses NHWC tensors + const unsigned int inputHeight = inputTensorInfo.GetShape()[1]; + const unsigned int inputWidth = inputTensorInfo.GetShape()[2]; + + const unsigned int filterHeight = filterTensorInfo.GetShape()[1]; + const unsigned int filterWidth = filterTensorInfo.GetShape()[2]; + // This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null // And the tensor is a constant, we can access the data at load time and set the output shape of the // layer. If this is not constant, We do not have access to the shape data, so we have to use @@ -1634,32 +1668,47 @@ void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorI desc.m_OutputShape.push_back(static_cast(dimension)); } desc.m_OutputShapeEnabled = true; - } - armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2); - armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1); - // TfLite uses NHWC tensors - const unsigned int inputHeight = inputTensorInfo.GetShape()[1]; - const unsigned int inputWidth = inputTensorInfo.GetShape()[2]; - - const unsigned int filterHeight = filterTensorInfo.GetShape()[1]; - const unsigned int filterWidth = filterTensorInfo.GetShape()[2]; - - CalcPadding(inputHeight, - filterHeight, - desc.m_StrideY, - 1, // DilationY - desc.m_PadTop, - desc.m_PadBottom, - options->padding); - - CalcPadding(inputWidth, - filterWidth, - desc.m_StrideX, - 1, // DilationX - desc.m_PadLeft, - desc.m_PadRight, - options->padding); + // TfLite uses NHWC tensors + const unsigned int outputHeight = desc.m_OutputShape[1]; + const unsigned int outputWidth = desc.m_OutputShape[2]; + + CalcPadding(inputHeight, + filterHeight, + desc.m_StrideY, + 1, // DilationY + desc.m_PadTop, + desc.m_PadBottom, + options->padding, + outputHeight); + + CalcPadding(inputWidth, + filterWidth, + desc.m_StrideX, + 1, // DilationX + desc.m_PadLeft, + desc.m_PadRight, + options->padding, + outputWidth); + } + else + { + CalcPadding(inputHeight, + filterHeight, + desc.m_StrideY, + 1, // DilationY + desc.m_PadTop, + desc.m_PadBottom, + options->padding); + + CalcPadding(inputWidth, + filterWidth, + desc.m_StrideX, + 1, // DilationX + desc.m_PadLeft, + desc.m_PadRight, + options->padding); + } auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType()); -- cgit v1.2.1