ArmNN
 22.05
Pooling3dLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Pooling3dLayer.hpp"
7 
8 #include "LayerCloneBase.hpp"
9 
10 #include <armnn/TypesUtils.hpp>
11 
13 
16 
17 using namespace armnnUtils;
18 
19 namespace armnn
20 {
21 
22 Pooling3dLayer::Pooling3dLayer(const Pooling3dDescriptor& param, const char* name)
23  : LayerWithParameters(1, 1, LayerType::Pooling3d, param, name)
24 {
25 }
26 
27 std::unique_ptr<IWorkload> Pooling3dLayer::CreateWorkload(const IWorkloadFactory& factory) const
28 {
29  Pooling3dQueueDescriptor descriptor;
30  SetAdditionalInfo(descriptor);
31 
32  return factory.CreateWorkload(LayerType::Pooling3d, descriptor, PrepInfoAndDesc(descriptor));
33 }
34 
36 {
37  return CloneBase<Pooling3dLayer>(graph, m_Param, GetName());
38 }
39 
40 std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
41 {
42  ARMNN_ASSERT(inputShapes.size() == 1);
43  const TensorShape& inputShape = inputShapes[0];
44  const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
45 
46  // If we support multiple batch dimensions in the future, then this assert will need to change.
47  ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Pooling3dLayer will always have 5D input.");
48 
49  unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
50  unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
51  unsigned int inDepth = inputShape[dimensionIndices.GetDepthIndex()];
52  unsigned int inChannels = inputShape[dimensionIndices.GetChannelsIndex()];
53  unsigned int inBatchSize = inputShape[0];
54 
55  bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0 && m_Param.m_StrideZ==0);
56  unsigned int outWidth = 1;
57  unsigned int outHeight = 1;
58  unsigned int outDepth = 1;
59  if (!isGlobalPooling)
60  {
62  "Stride can only be zero when performing global pooling");
63 
64  auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
65  {
66  unsigned int readSize = inSize + lowPad + highPad - poolSize;
67  float div = static_cast<float>(readSize) / static_cast<float>(stride);
68 
69  unsigned int size = 0;
70  switch (outputShapeRounding)
71  {
73  size = static_cast<unsigned int>(ceil(div)) + 1;
74  break;
76  size = static_cast<unsigned int>(floor(div)) + 1;
77  break;
78  default:
79  ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
80  }
81 
82  // Makes sure that border operations will start from inside the input and not the padded area.
83  // This is what CL does...
84  if ((size - 1)*stride >= inSize + lowPad)
85  {
86  --size;
87  }
88 
89  return size;
90  };
91 
92  outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
94  outHeight = CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
96  outDepth = CalcSize(inDepth, m_Param.m_PadFront, m_Param.m_PadBack, m_Param.m_PoolDepth, m_Param.m_StrideZ,
98  }
99  unsigned int outChannels = inChannels;
100  unsigned int outBatchSize = inBatchSize;
101 
103  TensorShape( { outBatchSize, outDepth, outHeight, outWidth, outChannels } ) :
104  TensorShape( { outBatchSize, outChannels, outDepth, outHeight, outWidth });
105 
106  return std::vector<TensorShape>({ tensorShape });
107 }
108 
110 {
112 
113  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
114 
116 
117  auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
118 
119  ARMNN_ASSERT(inferredShapes.size() == 1);
120 
121  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
122 }
123 
125 void Pooling3dLayer::Accept(ILayerVisitor& visitor) const
126 {
127  visitor.VisitPooling3dLayer(this, GetParameters(), GetName());
128 }
130 
131 } // namespace armnn
Pooling3dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
unsigned int GetWidthIndex() const
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_PoolDepth
Pooling depth value.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of Pooling3dLayer.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Pooling3d type.
Pooling3dLayer(const Pooling3dDescriptor &param, const char *name)
Constructor to create a Pooling3dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
unsigned int GetDepthIndex() const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
Copyright (c) 2021 ARM Limited and Contributors.
DataLayout m_DataLayout
The data layout to be used (NCDHW, NDHWC).
const Pooling3dDescriptor & GetParameters() const override
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
uint32_t m_PadFront
Padding front value in the depth dimension.
uint32_t m_PoolHeight
Pooling height value.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
unsigned int GetHeightIndex() const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
uint32_t m_PadBack
Padding back value in the depth dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
uint32_t m_PadBottom
Padding bottom value in the height dimension.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
This layer represents a pooling 3d operation.
A Pooling3dDescriptor for the Pooling3dLayer.
uint32_t m_StrideZ
Stride value when proceeding through input for the depth dimension.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
virtual const TensorInfo & GetTensorInfo() const =0
void Pooling3d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling3dDescriptor &params)
Computes the Pooling3d operation.
Definition: Pooling3d.cpp:172
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:317
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
unsigned int GetChannelsIndex() const
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:421
Pooling3dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467