From 377fb21e956ea68ffd234be47481002a0e46ee46 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Tue, 10 Jan 2023 15:55:28 +0000 Subject: IVGCVSW-7244 ConcatLayer overlapping views on TFLiteParser * Added ability to calculate dynamic tensors and propagate them through the model so that when those tensors are later used as inputs they have the right shapes. * Added InferOutputShapes to DetectionPostProcessLayer. * Added InferOutputShapes to MeanLayer. * Added InferOutputShapes to RankLayer. * Added InferOutputShapes to ReduceLayer. * Fixed typos in TfLiteParser. Signed-off-by: Mike Kelly Change-Id: I880c0716938ef278f5dbf01a8a73a5cc99ce5ded --- src/armnn/layers/ChannelShuffleLayer.hpp | 3 +-- src/armnn/layers/DetectionPostProcessLayer.cpp | 34 ++++++++++++++++------- src/armnn/layers/DetectionPostProcessLayer.hpp | 8 +++++- src/armnn/layers/MeanLayer.cpp | 22 ++++++++++----- src/armnn/layers/MeanLayer.hpp | 8 +++++- src/armnn/layers/RankLayer.cpp | 7 ++++- src/armnn/layers/RankLayer.hpp | 37 ++++++++++++++++++-------- src/armnn/layers/ReduceLayer.cpp | 21 +++++++++++---- src/armnn/layers/ReduceLayer.hpp | 9 +++++-- src/armnn/layers/ReshapeLayer.cpp | 3 ++- 10 files changed, 113 insertions(+), 39 deletions(-) (limited to 'src/armnn') diff --git a/src/armnn/layers/ChannelShuffleLayer.hpp b/src/armnn/layers/ChannelShuffleLayer.hpp index 79ab426a44..45cd5e5d82 100644 --- a/src/armnn/layers/ChannelShuffleLayer.hpp +++ b/src/armnn/layers/ChannelShuffleLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -26,7 +26,6 @@ public: /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. void ValidateTensorShapesFromInputs() override; - // TODO Do you need to create an InferOutputShapes function for ChannelShuffle? protected: ChannelShuffleLayer(const ChannelShuffleDescriptor& param, const char* name); diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp index 28c6d50659..33f894414a 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.cpp +++ b/src/armnn/layers/DetectionPostProcessLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,30 +49,46 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs() ARMNN_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs."); - unsigned int detectedBoxes = m_Param.m_MaxDetections * m_Param.m_MaxClassesPerDetection; + std::vector inferredShapes = InferOutputShapes( + { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); - const TensorShape& inferredDetectionBoxes = TensorShape({ 1, detectedBoxes, 4 }); - const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes }); - const TensorShape& inferredNumberDetections = TensorShape({ 1 }); + ARMNN_ASSERT(inferredShapes.size() == 4); + ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified); + ARMNN_ASSERT(inferredShapes[1].GetDimensionality() == Dimensionality::Specified); + ARMNN_ASSERT(inferredShapes[2].GetDimensionality() == Dimensionality::Specified); + ARMNN_ASSERT(inferredShapes[3].GetDimensionality() == Dimensionality::Specified); - ValidateAndCopyShape(outputShape, inferredDetectionBoxes, m_ShapeInferenceMethod, "DetectionPostProcessLayer"); + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DetectionPostProcessLayer"); ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(), - inferredDetectionScores, + inferredShapes[1], m_ShapeInferenceMethod, "DetectionPostProcessLayer", 1); ValidateAndCopyShape(GetOutputSlot(2).GetTensorInfo().GetShape(), - inferredDetectionScores, + inferredShapes[2], m_ShapeInferenceMethod, "DetectionPostProcessLayer", 2); ValidateAndCopyShape(GetOutputSlot(3).GetTensorInfo().GetShape(), - inferredNumberDetections, + inferredShapes[3], m_ShapeInferenceMethod, "DetectionPostProcessLayer", 3); } +std::vector DetectionPostProcessLayer::InferOutputShapes(const std::vector&) const +{ + unsigned int detectedBoxes = m_Param.m_MaxDetections * m_Param.m_MaxClassesPerDetection; + + std::vector results; + results.push_back({ 1, detectedBoxes, 4 }); + results.push_back({ 1, detectedBoxes }); + results.push_back({ 1, detectedBoxes }); + results.push_back({ 1 }); + return results; +} + Layer::ConstantTensors DetectionPostProcessLayer::GetConstantTensorsByRef() { // For API stability DO NOT ALTER order and add new members to the end of vector diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp index 07eb270f1f..e203032db0 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.hpp +++ b/src/armnn/layers/DetectionPostProcessLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -34,6 +34,12 @@ public: /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. void ValidateTensorShapesFromInputs() override; + /// The model does not specify the output shapes. The output shapes are calculated from the max_detection and + /// max_classes_per_detection parameters in the DetectionPostProcessDescriptor. + /// @param [in] inputShapes The input shapes layer has. These are ignored for DetectionPostProcessLayer. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + void ExecuteStrategy(IStrategy& strategy) const override; protected: diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp index 49eac04a8e..a6f721b076 100644 --- a/src/armnn/layers/MeanLayer.cpp +++ b/src/armnn/layers/MeanLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -49,7 +49,19 @@ void MeanLayer::ValidateTensorShapesFromInputs() VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); - const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo(); + std::vector inferredShapes = InferOutputShapes( + { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + ARMNN_ASSERT(inferredShapes.size() == 1); + ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified); + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MeanLayer"); +} + +std::vector MeanLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + ARMNN_ASSERT(inputShapes.size() == 1); + const TensorShape& input = inputShapes[0]; ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4, "MeanLayer: Mean supports up to 4D input."); @@ -88,7 +100,7 @@ void MeanLayer::ValidateTensorShapesFromInputs() { if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end()) { - dimSizes[outputIndex] = armnn::numeric_cast(input.GetShape()[i]); + dimSizes[outputIndex] = armnn::numeric_cast(input[i]); ++outputIndex; } else if (m_Param.m_KeepDims) @@ -98,9 +110,7 @@ void MeanLayer::ValidateTensorShapesFromInputs() } } } - const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data()); - - ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer"); + return std::vector({ TensorShape(outputRank, dimSizes.data()) }); } void MeanLayer::ExecuteStrategy(IStrategy& strategy) const diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp index 87998bfc08..3bde7c6fcc 100644 --- a/src/armnn/layers/MeanLayer.hpp +++ b/src/armnn/layers/MeanLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -29,6 +29,12 @@ public: /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. void ValidateTensorShapesFromInputs() override; + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + void ExecuteStrategy(IStrategy& strategy) const override; protected: diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp index 0f9327b948..6ea271442f 100644 --- a/src/armnn/layers/RankLayer.cpp +++ b/src/armnn/layers/RankLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -31,6 +31,11 @@ Layer* RankLayer::Clone(Graph& graph) const return clone; } +std::vector RankLayer::InferOutputShapes(const std::vector&) const +{ + return std::vector({ TensorShape(Dimensionality::Scalar) }); +} + void RankLayer::ValidateTensorShapesFromInputs() { VerifyLayerConnections(1, CHECK_LOCATION()); diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp index 52d14c446e..a789917de0 100644 --- a/src/armnn/layers/RankLayer.hpp +++ b/src/armnn/layers/RankLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -12,21 +12,36 @@ namespace armnn class RankLayer : public Layer { - public: - /// Makes a workload for the Rank type. - /// @param [in] factory The workload factory which will create the workload. - /// @return A pointer to the created workload, or nullptr if not created. - virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; +public: + /// Makes a workload for the Rank type. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; - Layer* Clone(Graph& graph) const override; + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + Layer* Clone(Graph& graph) const override; - void ValidateTensorShapesFromInputs() override; + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref RankLayer. + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs() override; - void ExecuteStrategy(IStrategy& strategy) const override; + /// Rank returns a scalar specifying the rank of the input tensor. The rank of a tensor is the number + /// of dimensions it has. + /// @param [in] inputShapes The input shapes layer has. This is ignored for Rank. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + void ExecuteStrategy(IStrategy& strategy) const override; protected: - RankLayer(const char* name); - ~RankLayer() = default; + /// Constructor to create a RankLayer. + /// @param [in] name Optional name for the layer. + RankLayer(const char* name); + + /// Default destructor + ~RankLayer() = default; }; } //namespace armnn diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp index aa54bc8f0c..e411996ced 100644 --- a/src/armnn/layers/ReduceLayer.cpp +++ b/src/armnn/layers/ReduceLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Samsung Electronics Co Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -50,6 +50,19 @@ void ReduceLayer::ValidateTensorShapesFromInputs() const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo(); + ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4, + "ReduceLayer: Reduce supports up to 4D input."); + + std::vector inferredShapes = InferOutputShapes( {input.GetShape() }); + + ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReduceLayer"); +} + +std::vector ReduceLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + ARMNN_ASSERT(inputShapes.size() == 1); + const TensorShape& input = inputShapes[0]; + ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4, "ReduceLayer: Reduce supports up to 4D input."); @@ -87,7 +100,7 @@ void ReduceLayer::ValidateTensorShapesFromInputs() { if (std::find(m_Param.m_vAxis.begin(), m_Param.m_vAxis.end(), i) == m_Param.m_vAxis.end()) { - dimSizes[outputIndex] = armnn::numeric_cast(input.GetShape()[i]); + dimSizes[outputIndex] = armnn::numeric_cast(input[i]); ++outputIndex; } else if (m_Param.m_KeepDims) @@ -97,9 +110,7 @@ void ReduceLayer::ValidateTensorShapesFromInputs() } } } - const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data()); - - ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer"); + return std::vector({ TensorShape(outputRank, dimSizes.data()) }); } void ReduceLayer::ExecuteStrategy(IStrategy& strategy) const diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp index e9ea5d8e3f..24f19f812f 100644 --- a/src/armnn/layers/ReduceLayer.hpp +++ b/src/armnn/layers/ReduceLayer.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved. +// Copyright © 2020-2023 Samsung Electronics Co Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once @@ -23,13 +23,18 @@ public: /// @param [in] graph The graph into which this layer is being cloned. ReduceLayer* Clone(Graph& graph) const override; + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ReduceLayer. void ValidateTensorShapesFromInputs() override; void ExecuteStrategy(IStrategy& strategy) const override; - protected: /// Constructor to create a ReduceLayer. /// @param [in] param ReduceDescriptor to configure the reduction operation. diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp index c5ec45f211..e68460bf32 100644 --- a/src/armnn/layers/ReshapeLayer.cpp +++ b/src/armnn/layers/ReshapeLayer.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ReshapeLayer.hpp" @@ -49,6 +49,7 @@ void ReshapeLayer::ValidateTensorShapesFromInputs() auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); ARMNN_ASSERT(inferredShapes.size() == 1); + ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified); ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer"); } -- cgit v1.2.1