aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp11
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/ReduceLayer.cpp100
-rw-r--r--src/armnn/layers/ReduceLayer.hpp42
-rw-r--r--src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp1
8 files changed, 166 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 6e47399871..6e6559137c 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -63,6 +63,7 @@
X(QuantizedLstm) \
X(Reshape) \
X(Rank) \
+ X(Reduce) \
X(Resize) \
X(Slice) \
X(Softmax) \
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 197e1afe18..8812e0ea77 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -528,6 +528,7 @@ bool IsQuantizedLstmSupported(const BackendId& backend,
cellStateOut, output, paramsInfo);
}
+
bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
@@ -558,6 +559,16 @@ bool IsPreluSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsPreluSupported, input, alpha, output);
}
+bool IsReduceSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ const ReduceDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsReduceSupported, input, output, descriptor);
+}
+
bool IsReshapeSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index b9ca61a70b..6782fb5eb7 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -56,6 +56,7 @@
#include "layers/QLstmLayer.hpp"
#include "layers/QuantizedLstmLayer.hpp"
#include "layers/RankLayer.hpp"
+#include "layers/ReduceLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeLayer.hpp"
#include "layers/SliceLayer.hpp"
@@ -149,6 +150,7 @@ DECLARE_LAYER(Quantize)
DECLARE_LAYER(QLstm)
DECLARE_LAYER(QuantizedLstm)
DECLARE_LAYER(Rank)
+DECLARE_LAYER(Reduce)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(Resize)
DECLARE_LAYER(Slice)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index d41f2f6fa7..f8b0675f0d 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1491,6 +1491,12 @@ IConnectableLayer* Network::AddRankLayer(const char* name)
return m_Graph->AddLayer<RankLayer>(name);
}
+IConnectableLayer* Network::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
+}
+
IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c652edb416..1205bd847e 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -169,6 +169,9 @@ public:
IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override;
+ IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
new file mode 100644
index 0000000000..b68cd2eabc
--- /dev/null
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -0,0 +1,100 @@
+//
+// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ReduceLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+ReduceLayer::ReduceLayer(const ReduceDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Reduce, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ReduceLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ ReduceQueueDescriptor descriptor;
+ return factory.CreateReduce(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ReduceLayer* ReduceLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ReduceLayer>(graph, m_Param, GetName());
+}
+
+void ReduceLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+ ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
+ "ReduceLayer: Reduce supports up to 4D input.");
+
+ unsigned int rank = input.GetNumDimensions();
+ unsigned int outputRank = 0;
+
+ // Calculate output dimension
+ if (m_Param.m_KeepDims)
+ {
+ outputRank = rank;
+ }
+ else if (m_Param.m_vAxis.empty())
+ {
+ outputRank = 1;
+ }
+ else if (m_Param.m_vAxis.size() > input.GetNumDimensions())
+ {
+ throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions");
+ }
+ else
+ {
+ outputRank = input.GetNumDimensions() - armnn::numeric_cast<unsigned int>(m_Param.m_vAxis.size());
+ if (outputRank == 0)
+ {
+ outputRank = 1;
+ }
+ }
+
+ std::vector<unsigned int> dimSizes(outputRank, 1);
+ if (!m_Param.m_vAxis.empty())
+ {
+ // Skip the dimension that has been reduced unless keepDims is true.
+ unsigned int outputIndex = 0;
+ for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
+ {
+ if (std::find(m_Param.m_vAxis.begin(), m_Param.m_vAxis.end(), i) == m_Param.m_vAxis.end())
+ {
+ dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(input.GetShape()[i]);
+ ++outputIndex;
+ }
+ else if (m_Param.m_KeepDims)
+ {
+ dimSizes[outputIndex] = 1;
+ ++outputIndex;
+ }
+ }
+ }
+ const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data());
+
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "ReduceLayer");
+}
+
+void ReduceLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitReduceLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ReduceLayer.hpp b/src/armnn/layers/ReduceLayer.hpp
new file mode 100644
index 0000000000..fd4f2073f1
--- /dev/null
+++ b/src/armnn/layers/ReduceLayer.hpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a reduction operation.
+class ReduceLayer : public LayerWithParameters<ReduceDescriptor>
+{
+public:
+ /// Makes a workload for the Reduce type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ReduceLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ReduceLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ReduceLayer.
+ /// @param [in] param ReduceDescriptor to configure the reduction operation.
+ /// @param [in] name Optional name for the layer.
+ ReduceLayer(const ReduceDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ReduceLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index dc6d11440f..c911caa699 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -60,6 +60,7 @@ DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Normalization)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Pad)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Permute)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Pooling2d)
+DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Reduce)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Reshape)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Resize)
DECLARE_TEST_NAME_AND_DESCRIPTOR_LAYER_VISITOR_CLASS(Slice)