diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/armnn/layers/PadLayer.cpp | 31 | ||||
-rw-r--r-- | src/armnn/layers/PadLayer.hpp | 6 |
2 files changed, 36 insertions, 1 deletions
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp index 4fcbc77c7b..a8c749c570 100644 --- a/src/armnn/layers/PadLayer.cpp +++ b/src/armnn/layers/PadLayer.cpp @@ -36,10 +36,39 @@ PadLayer* PadLayer::Clone(Graph& graph) const return std::move(layer); } +std::vector<TensorShape> PadLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const +{ + ARMNN_ASSERT(inputShapes.size() == 1); + const TensorShape& inputShape = inputShapes[0]; + + unsigned int rank = inputShape.GetNumDimensions(); + ARMNN_ASSERT(m_Param.m_PadList.size() == rank); + ARMNN_ASSERT(rank != 0); + + std::vector<unsigned int> outputDimensionSizes; + outputDimensionSizes.reserve(rank); + for (unsigned int i = 0; i < rank; ++i) + { + outputDimensionSizes[i] = inputShape[i] + m_Param.m_PadList[i].first + m_Param.m_PadList[i].second; + } + + TensorShape tensorShape = TensorShape( rank, outputDimensionSizes.data()); + return std::vector<TensorShape>({ tensorShape }); +} + void PadLayer::ValidateTensorShapesFromInputs() { + VerifyLayerConnections(1, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + ARMNN_ASSERT(inferredShapes.size() == 1); - return; + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer"); } void PadLayer::Accept(ILayerVisitor& visitor) const diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp index 16cdbf57d4..5664997597 100644 --- a/src/armnn/layers/PadLayer.hpp +++ b/src/armnn/layers/PadLayer.hpp @@ -29,6 +29,12 @@ public: /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. void ValidateTensorShapesFromInputs() override; + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape> &inputShapes) const override; + void Accept(ILayerVisitor& visitor) const override; protected: |