From 0ad3ef15b7b731e9b722123f8763b2f1e3783cb8 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Fri, 3 Jul 2020 15:54:28 +0100 Subject: IVGCVSW-4988 Add handling output shape parameter to TransposeConvolution2d * Add m_OutputShape and m_OutputShapeEnabled to TransposeConvolution2dDescriptor. * Update TfLite parser to populate m_OutputShape if found in the model. Handle both Signed32 from tflite files and QAsymmU8 from test fixtures. * Update TransposeConvolution2dLayer to use m_OutputShape instead of InferOutputShapes if specified. Signed-off-by: Colm Donelan Change-Id: Ia6933065375eb8006c916f1ca67c38dc50bc205c --- include/armnn/Descriptors.hpp | 40 ++++++++++++++---------- src/armnn/layers/TransposeConvolution2dLayer.cpp | 20 +++++++++--- src/armnnTfLiteParser/TfLiteParser.cpp | 22 +++++++++++++ 3 files changed, 60 insertions(+), 22 deletions(-) diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 60aa219638..241b23d4ed 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -1203,37 +1203,43 @@ struct TransposeConvolution2dDescriptor m_StrideX(0), m_StrideY(0), m_BiasEnabled(false), - m_DataLayout(DataLayout::NCHW) + m_DataLayout(DataLayout::NCHW), + m_OutputShapeEnabled(false) {} bool operator ==(const TransposeConvolution2dDescriptor& rhs) const { - return m_PadLeft == rhs.m_PadLeft && - m_PadRight == rhs.m_PadRight && - m_PadTop == rhs.m_PadTop && - m_PadBottom == rhs.m_PadBottom && - m_StrideX == rhs.m_StrideX && - m_StrideY == rhs.m_StrideY && - m_BiasEnabled == rhs.m_BiasEnabled && - m_DataLayout == rhs.m_DataLayout; + return m_PadLeft == rhs.m_PadLeft && + m_PadRight == rhs.m_PadRight && + m_PadTop == rhs.m_PadTop && + m_PadBottom == rhs.m_PadBottom && + m_StrideX == rhs.m_StrideX && + m_StrideY == rhs.m_StrideY && + m_BiasEnabled == rhs.m_BiasEnabled && + m_DataLayout == rhs.m_DataLayout && + m_OutputShapeEnabled == rhs.m_OutputShapeEnabled && + m_OutputShape == rhs.m_OutputShape; } /// Padding left value in the width dimension. - uint32_t m_PadLeft; + uint32_t m_PadLeft; /// Padding right value in the width dimension. - uint32_t m_PadRight; + uint32_t m_PadRight; /// Padding top value in the height dimension. - uint32_t m_PadTop; + uint32_t m_PadTop; /// Padding bottom value in the height dimension. - uint32_t m_PadBottom; + uint32_t m_PadBottom; /// Stride value when proceeding through input for the width dimension. - uint32_t m_StrideX; + uint32_t m_StrideX; /// Stride value when proceeding through input for the height dimension. - uint32_t m_StrideY; + uint32_t m_StrideY; /// Enable/disable bias. - bool m_BiasEnabled; + bool m_BiasEnabled; /// The data layout to be used (NCHW, NHWC). - DataLayout m_DataLayout; + DataLayout m_DataLayout; + /// Output shape if it has been specified. + bool m_OutputShapeEnabled; + std::vector m_OutputShape; }; /// A TransposeDescriptor for the TransposeLayer. diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp index ffe92bbbd2..8a264253e0 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.cpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -111,16 +111,26 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceM ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null."); - auto inferredShapes = InferOutputShapes({ - GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), - m_Weight->GetTensorInfo().GetShape() }); + std::vector expectedOutputShape; + // If output_shape was specified then use it rather than calculate an inferred output shape. + if (m_Param.m_OutputShapeEnabled) + { + TensorShape shapeAsTensorShape(static_cast(m_Param.m_OutputShape.size()), + m_Param.m_OutputShape.data()); + expectedOutputShape.push_back(shapeAsTensorShape); + } + else + { + expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + m_Weight->GetTensorInfo().GetShape() }); + } - ARMNN_ASSERT(inferredShapes.size() == 1); + ARMNN_ASSERT(expectedOutputShape.size() == 1); ConditionalThrowIfNotEqual( "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", GetOutputSlot(0).GetTensorInfo().GetShape(), - inferredShapes[0]); + expectedOutputShape[0]); } Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef() diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index bad2504f18..1b93aadc5b 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -1082,6 +1082,28 @@ void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); CHECK_VALID_SIZE(outputs.size(), 1); + if (inputs[0]) + { + armnn::TensorInfo tensorInfo = ToTensorInfo(inputs[0]); + std::vector output_shape(tensorInfo.GetNumElements()); + if (tensorInfo.GetDataType() == DataType::Signed32) + { + ::memcpy(output_shape.data(), GetBuffer(m_Model, inputs[0]->buffer)->data.data(), tensorInfo.GetNumBytes()); + } + if (tensorInfo.GetDataType() == DataType::QAsymmU8) + { + for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++) + { + output_shape[i] = GetBuffer(m_Model, inputs[0]->buffer)->data.data()[i]; + } + } + // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor. + for (int dimension : output_shape) + { + desc.m_OutputShape.push_back(static_cast(dimension)); + } + desc.m_OutputShapeEnabled = true; + } armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[2]); armnn::TensorInfo filterTensorInfo = ToTensorInfo(inputs[1]); -- cgit v1.2.1