aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorTamás Nyíri <tamas.nyiri@arm.com>2021-10-26 14:47:57 +0100
committerTamas Nyiri <tamas.nyiri@arm.com>2021-11-17 11:31:44 +0000
commit7b885b3cce70154596b1994b013ea91527117c26 (patch)
treecdc2ee30a6dc03a4e26e6783a84ccd9be867242a /src/armnn
parent888a363115e0bf47f227c9db6fc1dbfe0418f69c (diff)
downloadarmnn-7b885b3cce70154596b1994b013ea91527117c26.tar.gz
IVGCVSW-6509 Front End + Reference Workload implementation
Subtask of story: IVGCVSW-6164 Add a Pooling3d FrontEnd and Ref Implementation * Add front end * Add reference workload * Add corresponding unit tests Change-Id: Icce4146dd0a06a1da46a2def00a82d343e171750 Signed-off-by: Tamas Nyiri <tamas.nyiri@arm.com>
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp8
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp12
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/Pooling3dLayer.cpp131
-rw-r--r--src/armnn/layers/Pooling3dLayer.hpp52
-rw-r--r--src/armnn/test/InferOutputTests.cpp3
-rw-r--r--src/armnn/test/InferOutputTests.hpp34
8 files changed, 245 insertions, 0 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index c3cebddb2b..f561b93c12 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -646,6 +646,14 @@ bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
}
+bool LayerSupportHandle::IsPooling3dSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const Pooling3dDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ return m_LayerSupport->IsPooling3dSupported(input, output, descriptor, reasonIfUnsupported.value());
+}
+
bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 49c39b3985..607c83b5fa 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -53,6 +53,7 @@
#include "layers/PadLayer.hpp"
#include "layers/PermuteLayer.hpp"
#include "layers/Pooling2dLayer.hpp"
+#include "layers/Pooling3dLayer.hpp"
#include "layers/PreCompiledLayer.hpp"
#include "layers/PreluLayer.hpp"
#include "layers/QuantizeLayer.hpp"
@@ -152,6 +153,7 @@ DECLARE_LAYER(Output)
DECLARE_LAYER(Pad)
DECLARE_LAYER(Permute)
DECLARE_LAYER(Pooling2d)
+DECLARE_LAYER(Pooling3d)
DECLARE_LAYER(PreCompiled)
DECLARE_LAYER(Prelu)
DECLARE_LAYER(Quantize)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 17a1da1f6c..d3a7f9788a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -208,6 +208,12 @@ IConnectableLayer* INetwork::AddPooling2dLayer(const Pooling2dDescriptor& poolin
return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
}
+IConnectableLayer* INetwork::AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddPooling3dLayer(pooling3dDescriptor, name);
+}
+
IConnectableLayer* INetwork::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name)
{
@@ -2033,6 +2039,12 @@ IConnectableLayer* NetworkImpl::AddPooling2dLayer(const Pooling2dDescriptor& poo
return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
}
+IConnectableLayer* NetworkImpl::AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<Pooling3dLayer>(pooling3dDescriptor, name);
+}
+
IConnectableLayer* NetworkImpl::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 818a765296..959d88dbed 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -167,6 +167,9 @@ public:
IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
const char* name = nullptr);
+ IConnectableLayer* AddPooling3dLayer(const Pooling3dDescriptor& pooling3dDescriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddPreluLayer(const char* name = nullptr);
IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
new file mode 100644
index 0000000000..884f8e0499
--- /dev/null
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -0,0 +1,131 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Pooling3dLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+Pooling3dLayer::Pooling3dLayer(const Pooling3dDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Pooling3d, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> Pooling3dLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ Pooling3dQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreatePooling3d(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+Pooling3dLayer* Pooling3dLayer::Clone(Graph& graph) const
+{
+ return CloneBase<Pooling3dLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ ARMNN_ASSERT(inputShapes.size() == 1);
+ const TensorShape& inputShape = inputShapes[0];
+ const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
+
+ // If we support multiple batch dimensions in the future, then this assert will need to change.
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Pooling3dLayer will always have 5D input.");
+
+ unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
+ unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
+ unsigned int inDepth = inputShape[dimensionIndices.GetDepthIndex()];
+ unsigned int inChannels = inputShape[dimensionIndices.GetChannelsIndex()];
+ unsigned int inBatchSize = inputShape[0];
+
+ bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0 && m_Param.m_StrideZ==0);
+ unsigned int outWidth = 1;
+ unsigned int outHeight = 1;
+ unsigned int outDepth = 1;
+ if (!isGlobalPooling)
+ {
+ ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0 && m_Param.m_StrideZ!=0,
+ "Stride can only be zero when performing global pooling");
+
+ auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
+ {
+ unsigned int readSize = inSize + lowPad + highPad - poolSize;
+ float div = static_cast<float>(readSize) / static_cast<float>(stride);
+
+ unsigned int size = 0;
+ switch (outputShapeRounding)
+ {
+ case OutputShapeRounding::Ceiling:
+ size = static_cast<unsigned int>(ceil(div)) + 1;
+ break;
+ case OutputShapeRounding ::Floor:
+ size = static_cast<unsigned int>(floor(div)) + 1;
+ break;
+ default:
+ ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+ }
+
+ // Makes sure that border operations will start from inside the input and not the padded area.
+ // This is what CL does...
+ if ((size - 1)*stride >= inSize + lowPad)
+ {
+ --size;
+ }
+
+ return size;
+ };
+
+ outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
+ m_Param.m_OutputShapeRounding);
+ outHeight = CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
+ m_Param.m_OutputShapeRounding);
+ outDepth = CalcSize(inDepth, m_Param.m_PadFront, m_Param.m_PadBack, m_Param.m_PoolDepth, m_Param.m_StrideZ,
+ m_Param.m_OutputShapeRounding);
+ }
+ unsigned int outChannels = inChannels;
+ unsigned int outBatchSize = inBatchSize;
+
+ TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NDHWC ?
+ TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } ) :
+ TensorShape( { outBatchSize, outChannels, outDepth, outHeight, outWidth });
+
+ return std::vector<TensorShape>({ tensorShape });
+}
+
+void Pooling3dLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ ARMNN_ASSERT(inferredShapes.size() == 1);
+
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
+}
+
+ARMNN_NO_DEPRECATE_WARN_BEGIN
+void Pooling3dLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitPooling3dLayer(this, GetParameters(), GetName());
+}
+ARMNN_NO_DEPRECATE_WARN_END
+
+} // namespace armnn
diff --git a/src/armnn/layers/Pooling3dLayer.hpp b/src/armnn/layers/Pooling3dLayer.hpp
new file mode 100644
index 0000000000..0aa48535c0
--- /dev/null
+++ b/src/armnn/layers/Pooling3dLayer.hpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a pooling 3d operation.
+class Pooling3dLayer : public LayerWithParameters<Pooling3dDescriptor>
+{
+public:
+ /// Makes a workload for the Pooling3d type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ Pooling3dLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref Pooling3dLayer.
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ void Accept(ILayerVisitor& visitor) const override;
+ ARMNN_NO_DEPRECATE_WARN_END
+
+
+protected:
+ /// Constructor to create a Pooling3dLayer.
+ /// @param [in] param Pooling3dDescriptor to configure the pooling3d operation.
+ /// @param [in] name Optional name for the layer.
+ Pooling3dLayer(const Pooling3dDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~Pooling3dLayer() = default;
+};
+
+} // namespace
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 5365b831cf..f8d8e89555 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -47,6 +47,9 @@ ARMNN_SIMPLE_TEST_CASE(DepthwiseConvolution2dInferOutputShape, DepthwiseConvolut
// TransposeConvolution2D
ARMNN_SIMPLE_TEST_CASE(TransposeConvolution2dInferOutputShape, TransposeConvolution2dInferOutputShapeTest)
+// Pooling3D
+ARMNN_SIMPLE_TEST_CASE(Pooling3dInferOutputShape, Pooling3dInferOutputShapeTest)
+
// QLstm
ARMNN_SIMPLE_TEST_CASE(QLstmInferOutputShape, QLstmInferOutputShapeTest)
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index e2c854551f..6435d87be3 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -565,6 +565,40 @@ void DepthwiseConvolution2dInferOutputShapeTest()
CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
}
+void Pooling3dInferOutputShapeTest()
+{
+ armnn::Graph graph;
+
+ armnn::Pooling3dDescriptor descriptor;
+ descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+ descriptor.m_PoolDepth = 2;
+ descriptor.m_PoolHeight = 2;
+ descriptor.m_PoolWidth = 2;
+ descriptor.m_PadTop = 1;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_PadLeft = 1;
+ descriptor.m_PadRight = 1;
+ descriptor.m_PadFront = 1;
+ descriptor.m_PadBack = 1;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_StrideZ = 2;
+ descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
+
+ armnn::Pooling3dLayer* const pooling3dLayer =
+ graph.AddLayer<armnn::Pooling3dLayer>(descriptor, "pooling3d");
+
+ std::vector<armnn::TensorShape> shapes;
+ const std::vector<unsigned int> inputSize = {1, 4, 4, 4, 1};
+ armnn::TensorShape inputShape(5, inputSize.data());
+ shapes.push_back(inputShape);
+
+ const std::vector<unsigned int> expectedOutputSizes = {1, 3, 3, 3, 1};
+ armnn::TensorShape expectedOutputShape(5, expectedOutputSizes.data());
+
+ CHECK(expectedOutputShape == pooling3dLayer->InferOutputShapes(shapes).at(0));
+}
+
// QLstm
void QLstmInferOutputShapeImpl(const armnn::QLstmDescriptor descriptor,
const std::vector<armnn::TensorShape>& inputShapes,