diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2021-02-03 09:29:30 +0000 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2021-02-03 09:29:47 +0000 |
commit | 0c3ea5b8ac5ad8ca516930a0491afb1d1074e45b (patch) | |
tree | 47ff1e9c1c70a3b134c1e9063dada66d70a7c963 /src/armnn | |
parent | 84f41eb74765bd93307f3c6b334354c486dc746d (diff) | |
download | armnn-0c3ea5b8ac5ad8ca516930a0491afb1d1074e45b.tar.gz |
backends/reference: Add ReduceSum operation support
This patch addes ReduceSum operation support for reference backend,
which computes the sum of elements across dimensions of a tensor.
Changelog v1:
- Fix file header descriptions.
Chagelog v2:
- Fix line limit issue.
- Fix type conversion issue.
Changelog v3:
- Remove tabs.
- Modify newly added file headers.
Changelog v4:
- Symbol on header isn't allowed so drop it from newly added file headers.
Changelog v5:
- Remove tabs, fix the use of brackets and align lines correctly.
Changelog v6:
- Add serializer and deserializer support.
Changelog v7:
- Fix build error add missed code.
Changelog v8:
- Rename ReduceSumDecriptor to ReduceDescriptor
- Update m_KeepDims field data type to bool on ReduceDescriptor
- Add ReduceOperation field to ReduceDescriptor
- Rename ReduceSumLayer to ReduceLayer
- Update ReduceLayer to use ReduceDescriptor
- Update ReduceLayer::ValidateTensorShapesFromInputs() function
- Rename RefReduceSumWokload to RefReduceWorkload
- Update workload to use ReduceDescriptor
- Update workload to use Decoders and Encoders
- Remove ReduceSum.hpp and ReduceSum.cpp
- Added Reduce.hpp and Reduce.cpp
- Move Mean.cpp (which is implementing REDUCE_MEAN) functionality to Reduce.cpp
- Update RefMeanWorkload to call Reduce function with ReduceOperation::Mean argument
- Remove Mean.hpp and Mean.cpp
- Update the Serializer/Deserializer ArmnnSchema.fbs for ReduceLayer, ReduceDescriptor, and ReduceOperation
- Update Serializer and Deserializer for serializing/parsing ReduceLayer
- Added TfLiter parser Sum test for REDUCE_SUM operator
- Make corresponding changes on front-end and Ref backend to support REDUCE_SUM operator
Changelog v9:
- Fixed build errors.
Change-Id: I8c8e034f3df73f9565b3c18eff51ecca6c542195
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/InternalTypes.hpp | 1 | ||||
-rw-r--r-- | src/armnn/LayerSupport.cpp | 11 | ||||
-rw-r--r-- | src/armnn/LayersFwd.hpp | 2 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 6 | ||||
-rw-r--r-- | src/armnn/Network.hpp | 3 | ||||
-rw-r--r-- | src/armnn/layers/ReduceLayer.cpp | 100 | ||||
-rw-r--r-- | src/armnn/layers/ReduceLayer.hpp | 42 | ||||
-rw-r--r-- | src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp | 1 |
8 files changed, 166 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 6e47399871..6e6559137c 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -63,6 +63,7 @@ X(QuantizedLstm) \ X(Reshape) \ X(Rank) \ + X(Reduce) \ X(Resize) \ X(Slice) \ X(Softmax) \ diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 197e1afe18..8812e0ea77 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -528,6 +528,7 @@ bool IsQuantizedLstmSupported(const BackendId& backend, cellStateOut, output, paramsInfo); } + bool IsPermuteSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, @@ -558,6 +559,16 @@ bool IsPreluSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output); } +bool IsReduceSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const ReduceDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsReduceSupported, input, output, descriptor); +} + bool IsReshapeSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index b9ca61a70b..6782fb5eb7 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -56,6 +56,7 @@ #include "layers/QLstmLayer.hpp" #include "layers/QuantizedLstmLayer.hpp" #include "layers/RankLayer.hpp" +#include "layers/ReduceLayer.hpp" #include "layers/ReshapeLayer.hpp" #include "layers/ResizeLayer.hpp" #include "layers/SliceLayer.hpp" @@ -149,6 +150,7 @@ DECLARE_LAYER(Quantize) DECLARE_LAYER(QLstm) DECLARE_LAYER(QuantizedLstm) DECLARE_LAYER(Rank) +DECLARE_LAYER(Reduce) DECLARE_LAYER(Reshape) DECLARE_LAYER(Resize) DECLARE_LAYER(Slice) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index d41f2f6fa7..f8b0675f0d 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1491,6 +1491,12 @@ IConnectableLayer* Network::AddRankLayer(const char* name) return m_Graph->AddLayer<RankLayer>(name); } +IConnectableLayer* Network::AddReduceLayer(const ReduceDescriptor& reduceDescriptor, + const char* name) +{ + return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name); +} + IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor, const char* name) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index c652edb416..1205bd847e 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -169,6 +169,9 @@ public: IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name = nullptr) override; + IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor, + const char* name = nullptr) override; + IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc, const char* name = nullptr) override; diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp new file mode 100644 index 0000000000..b68cd2eabc --- /dev/null +++ b/src/armnn/layers/ReduceLayer.cpp @@ -0,0 +1,100 @@ +// +// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReduceLayer.hpp" +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +ReduceLayer::ReduceLayer(const ReduceDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Reduce, param, name) +{ +} + +std::unique_ptr<IWorkload> ReduceLayer::CreateWorkload(const IWorkloadFactory& factory) const +{ + ReduceQueueDescriptor descriptor; + return factory.CreateReduce(descriptor, PrepInfoAndDesc(descriptor)); +} + +ReduceLayer* ReduceLayer::Clone(Graph& graph) const +{ + return CloneBase<ReduceLayer>(graph, m_Param, GetName()); +} + +void ReduceLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); + + VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); + + const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo(); + + ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4, + "ReduceLayer: Reduce supports up to 4D input."); + + unsigned int rank = input.GetNumDimensions(); + unsigned int outputRank = 0; + + // Calculate output dimension + if (m_Param.m_KeepDims) + { + outputRank = rank; + } + else if (m_Param.m_vAxis.empty()) + { + outputRank = 1; + } + else if (m_Param.m_vAxis.size() > input.GetNumDimensions()) + { + throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions"); + } + else + { + outputRank = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Param.m_vAxis.size()); + if (outputRank == 0) + { + outputRank = 1; + } + } + + std::vector<unsigned int> dimSizes(outputRank, 1); + if (!m_Param.m_vAxis.empty()) + { + // Skip the dimension that has been reduced unless keepDims is true. + unsigned int outputIndex = 0; + for (unsigned int i = 0; i < input.GetNumDimensions(); ++i) + { + if (std::find(m_Param.m_vAxis.begin(), m_Param.m_vAxis.end(), i) == m_Param.m_vAxis.end()) + { + dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(input.GetShape()[i]); + ++outputIndex; + } + else if (m_Param.m_KeepDims) + { + dimSizes[outputIndex] = 1; + ++outputIndex; + } + } + } + const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data()); + + ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer"); +} + +void ReduceLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitReduceLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp new file mode 100644 index 0000000000..fd4f2073f1 --- /dev/null +++ b/src/armnn/layers/ReduceLayer.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a reduction operation. +class ReduceLayer : public LayerWithParameters<ReduceDescriptor> +{ +public: + /// Makes a workload for the Reduce type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ReduceLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ReduceLayer. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a ReduceLayer. + /// @param [in] param ReduceDescriptor to configure the reduction operation. + /// @param [in] name Optional name for the layer. + ReduceLayer(const ReduceDescriptor& param, const char* name); + + /// Default destructor + ~ReduceLayer() = default; +}; + +} // namespace armnn diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp index dc6d11440f..c911caa699 100644 --- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp +++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp @@ -60,6 +60,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Normalization) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Pad) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Permute) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Pooling2d) +DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Reduce) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Reshape) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Resize) DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Slice) |