aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-04-26 11:19:03 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-04-26 15:58:14 +0000
commit024ef0b460c802a7c841dcba4b7e894e714d4512 (patch)
tree3a49a270538308a7fd24ae05e805d0acbeac4b26
parent357add2c4685362afb188feaaa67b90e4d6d2361 (diff)
downloadarmnn-024ef0b460c802a7c841dcba4b7e894e714d4512.tar.gz
BugFix: calculate explicit padding for Transpose Convolution using output size
* If the output shape is given in Transpose convolution, use it to calculate the padding Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I0bf3dee94c2ce606ed67fb385018b220188c3017
-rw-r--r--src/armnn/layers/TransposeConvolution2dLayer.cpp23
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp99
2 files changed, 88 insertions, 34 deletions
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index f79c5887fb..534d6b431e 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -98,20 +98,25 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
std::vector<TensorShape> expectedOutputShape;
+ std::vector<TensorShape> outputShapeGivenAsInput;
+
+ expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ m_Weight->GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(expectedOutputShape.size() == 1);
+
// If output_shape was specified then use it rather than calculate an inferred output shape.
if (m_Param.m_OutputShapeEnabled)
{
TensorShape shapeAsTensorShape(static_cast<unsigned int>(m_Param.m_OutputShape.size()),
m_Param.m_OutputShape.data());
- expectedOutputShape.push_back(shapeAsTensorShape);
- }
- else
- {
- expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
- m_Weight->GetTensorInfo().GetShape() });
- }
+ outputShapeGivenAsInput.push_back(shapeAsTensorShape);
- ARMNN_ASSERT(expectedOutputShape.size() == 1);
+ ARMNN_ASSERT(outputShapeGivenAsInput.size() == 1);
+ ARMNN_ASSERT_MSG(expectedOutputShape == outputShapeGivenAsInput,
+ "TransposeConvolution2dLayer: output calculated by InferOutputShapes and "
+ "the output given as an input parameter to the layer are not matching");
+ }
ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
}
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 2a7f049470..244f1fa197 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -411,6 +411,29 @@ void CalcPadding(uint32_t inputSize,
}
}
+// Function that calculates explicit padding when the output shape is known.
+// At the moment the output is only given as an input parameter in Transpose Convolution,
+// not in Convolution and Depthwise Convolution
+void CalcPadding(uint32_t inputSize,
+ uint32_t filterSize,
+ uint32_t stride,
+ uint32_t dilation,
+ uint32_t& paddingFront,
+ uint32_t& paddingBack,
+ tflite::Padding padding,
+ uint32_t outputSize)
+{
+ IgnoreUnused(dilation);
+ paddingFront = 0;
+ paddingBack = 0;
+ if (padding == tflite::Padding_SAME)
+ {
+ uint32_t totalPadding = (inputSize - 1) * stride + filterSize - outputSize;
+ paddingFront = totalPadding / 2;
+ paddingBack = totalPadding - paddingFront;
+ }
+}
+
armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
const std::vector<unsigned int>& shape,
const bool outputTensor = false)
@@ -1608,6 +1631,17 @@ void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorI
auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
+
+ armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
+ armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
+
+ // TfLite uses NHWC tensors
+ const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+ const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
+
+ const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
+ const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
+
// This block determines the output shape of the transpose convolution. If the output shape tensor ptr is not null
// And the tensor is a constant, we can access the data at load time and set the output shape of the
// layer. If this is not constant, We do not have access to the shape data, so we have to use
@@ -1634,32 +1668,47 @@ void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorI
desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
}
desc.m_OutputShapeEnabled = true;
- }
- armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 2);
- armnn::TensorInfo filterTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
- // TfLite uses NHWC tensors
- const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
- const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
-
- const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
- const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
-
- CalcPadding(inputHeight,
- filterHeight,
- desc.m_StrideY,
- 1, // DilationY
- desc.m_PadTop,
- desc.m_PadBottom,
- options->padding);
-
- CalcPadding(inputWidth,
- filterWidth,
- desc.m_StrideX,
- 1, // DilationX
- desc.m_PadLeft,
- desc.m_PadRight,
- options->padding);
+ // TfLite uses NHWC tensors
+ const unsigned int outputHeight = desc.m_OutputShape[1];
+ const unsigned int outputWidth = desc.m_OutputShape[2];
+
+ CalcPadding(inputHeight,
+ filterHeight,
+ desc.m_StrideY,
+ 1, // DilationY
+ desc.m_PadTop,
+ desc.m_PadBottom,
+ options->padding,
+ outputHeight);
+
+ CalcPadding(inputWidth,
+ filterWidth,
+ desc.m_StrideX,
+ 1, // DilationX
+ desc.m_PadLeft,
+ desc.m_PadRight,
+ options->padding,
+ outputWidth);
+ }
+ else
+ {
+ CalcPadding(inputHeight,
+ filterHeight,
+ desc.m_StrideY,
+ 1, // DilationY
+ desc.m_PadTop,
+ desc.m_PadBottom,
+ options->padding);
+
+ CalcPadding(inputWidth,
+ filterWidth,
+ desc.m_StrideX,
+ 1, // DilationX
+ desc.m_PadLeft,
+ desc.m_PadRight,
+ options->padding);
+ }
auto filterTensorAndData = CreateConstTensorNonPermuted(inputs[1], filterTensorInfo, inputTensorInfo.GetDataType());