ArmNN
 21.11
Convolution2dLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Convolution2dLayer.hpp"
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
10 
12 
15 
16 #include <string>
17 
18 using namespace armnnUtils;
19 
20 namespace armnn
21 {
22 
24  : LayerWithParameters(1, 1, LayerType::Convolution2d, param, name)
25 {
26 
27 }
28 
30 {
31  //using DescriptorType = Parameters;
32  const std::vector<TensorShape>& inputShapes =
33  {
35  m_Weight->GetTensorInfo().GetShape()
36  };
37  const TensorShape filterShape = inputShapes[1];
38  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
39  unsigned int filterWidth = filterShape[dataLayoutIndex.GetWidthIndex()];
40  unsigned int filterHeight = filterShape[dataLayoutIndex.GetHeightIndex()];
41  unsigned int outChannels = filterShape[0];
42 
43  fn("OutputChannels",std::to_string(outChannels));
44  fn("FilterWidth",std::to_string(filterWidth));
45  fn("FilterHeight",std::to_string(filterHeight));
47 }
48 
49 std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
50 {
51  // on this level constant data should not be released..
52  ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
53  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Convolution2dLayer_CreateWorkload");
55 
56  descriptor.m_Weight = m_Weight.get();
57 
59  {
60  ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
61  descriptor.m_Bias = m_Bias.get();
62  }
63 
64  SetAdditionalInfo(descriptor);
65 
66  return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
67 }
68 
70 {
71  auto layer = CloneBase<Convolution2dLayer>(graph, m_Param, GetName());
72 
73  layer->m_Weight = m_Weight ? m_Weight : nullptr;
74 
75  if (layer->m_Param.m_BiasEnabled)
76  {
77  layer->m_Bias = m_Bias ? m_Bias : nullptr;
78  }
79 
80  return std::move(layer);
81 }
82 
83 std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
84 {
85  ARMNN_ASSERT(inputShapes.size() == 2);
86  const TensorShape& inputShape = inputShapes[0];
87  const TensorShape filterShape = inputShapes[1];
88 
89  // If we support multiple batch dimensions in the future, then this assert will need to change.
90  ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
91 
94 
95  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
96 
97  unsigned int inWidth = inputShape[dataLayoutIndex.GetWidthIndex()];
98  unsigned int inHeight = inputShape[dataLayoutIndex.GetHeightIndex()];
99  unsigned int inBatchSize = inputShape[0];
100 
101  unsigned int filterWidth = filterShape[dataLayoutIndex.GetWidthIndex()];
102  unsigned int dilatedFilterWidth = filterWidth + (m_Param.m_DilationX - 1) * (filterWidth - 1);
103  unsigned int readWidth = (inWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - dilatedFilterWidth;
104  unsigned int outWidth = 1 + (readWidth / m_Param.m_StrideX);
105 
106  unsigned int filterHeight = filterShape[dataLayoutIndex.GetHeightIndex()];
107  unsigned int dilatedFilterHeight = filterHeight + (m_Param.m_DilationY - 1) * (filterHeight - 1);
108  unsigned int readHeight = (inHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - dilatedFilterHeight;
109  unsigned int outHeight = 1 + (readHeight / m_Param.m_StrideY);
110 
111  unsigned int outChannels = filterShape[0];
112  unsigned int outBatchSize = inBatchSize;
113 
115  TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
116  TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
117 
118  return std::vector<TensorShape>({ tensorShape });
119 }
120 
122 {
124 
125  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
126 
128 
129  // check if we m_Weight data is not nullptr
130  ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
131 
132  auto inferredShapes = InferOutputShapes({
134  m_Weight->GetTensorInfo().GetShape() });
135 
136  ARMNN_ASSERT(inferredShapes.size() == 1);
137 
138  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
139 }
140 
142 {
143  return {m_Weight, m_Bias};
144 }
145 
147 void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
148 {
149  ManagedConstTensorHandle managedWeight(m_Weight);
150  ConstTensor weightsTensor(managedWeight.GetTensorInfo(), managedWeight.Map());
151 
152  Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
153  ManagedConstTensorHandle managedBias(m_Bias);
154  if (GetParameters().m_BiasEnabled)
155  {
156  ConstTensor biasTensor(managedBias.GetTensorInfo(), managedBias.Map());
157  optionalBiasTensor = Optional<ConstTensor>(biasTensor);
158  }
159 
160  visitor.VisitConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
161 }
163 
165 {
166  ManagedConstTensorHandle managedWeight(m_Weight);
167  std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
168 
169  ManagedConstTensorHandle managedBias(m_Bias);
170  if (GetParameters().m_BiasEnabled)
171  {
172  constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map()));
173  }
174 
175  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
176 }
177 
178 } // namespace armnn
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Convolution2dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const Convolution2dDescriptor & GetParameters() const
unsigned int GetWidthIndex() const
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
A Convolution2dDescriptor for the Convolution2dLayer.
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
const ConstTensorHandle * m_Weight
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
const ConstTensorHandle * m_Bias
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string (currently used in DotSerializer and company)...
uint32_t m_PadRight
Padding right value in the width dimension.
Convolution2dLayer(const Convolution2dDescriptor &param, const char *name)
Constructor to create a Convolution2dLayer.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:433
const TensorInfo & GetTensorInfo() const
Copyright (c) 2021 ARM Limited and Contributors.
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
uint32_t m_DilationY
Dilation along y axis.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:393
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
unsigned int GetHeightIndex() const
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:349
uint32_t m_PadTop
Padding top value in the height dimension.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of Convolution2dLayer.
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: Layer.hpp:393
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
Convolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:245
uint32_t m_DilationX
Dilation along x axis.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Convolution2d type.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
This layer represents a convolution 2d operation.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:443