ArmNN
 22.05
DepthwiseConvolution2dLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
10 
12 
15 
16 #include <string>
17 
18 using namespace armnnUtils;
19 
20 namespace armnn
21 {
22 
24  const char* name)
26 {
27 }
28 
30 {
31  const std::vector<TensorShape>& inputShapes =
32  {
35  };
36  const TensorShape filterShape = inputShapes[1];
37  unsigned int inputChannels = filterShape[1];
38  unsigned int filterWidth = filterShape[3];
39  unsigned int filterHeight = filterShape[2];
40  unsigned int depthMultiplier = filterShape[0];
41 
42  fn("FilterWidth",std::to_string(filterWidth));
43  fn("FilterHeight",std::to_string(filterHeight));
44  fn("DepthMultiplier",std::to_string(depthMultiplier));
45  fn("InputChannels",std::to_string(inputChannels));
46 
48 }
49 
50 std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
51 {
53 
54  if (m_Weight)
55  {
56  descriptor.m_Weight = m_Weight.get();
57  }
59  {
60  descriptor.m_Bias = m_Bias.get();
61  }
62 
63  SetAdditionalInfo(descriptor);
64 
65  return factory.CreateWorkload(LayerType::DepthwiseConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
66 }
67 
69 {
70  auto layer = CloneBase<DepthwiseConvolution2dLayer>(graph, m_Param, GetName());
71  layer->m_Weight = m_Weight ? m_Weight : nullptr;
72 
73  if (layer->m_Param.m_BiasEnabled)
74  {
75  layer->m_Bias = m_Bias ? m_Bias : nullptr;
76  }
77 
78  return std::move(layer);
79 }
80 
81 std::vector<TensorShape>
82 DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
83 {
84  ARMNN_ASSERT(inputShapes.size() == 2);
85  const TensorShape& inputShape = inputShapes[0];
86  const TensorShape& filterShape = inputShapes[1];
87 
88  ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
89 
92 
93  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
94 
95  unsigned int inputBatchSize = inputShape[0];
96  unsigned int inputHeight = inputShape[dataLayoutIndex.GetHeightIndex()];
97  unsigned int inputWidth = inputShape[dataLayoutIndex.GetWidthIndex()];
98 
99  // Expected filter shape: [ 1, H, W, O ] - This shape does NOT depend on the data layout
100  // Namely: [ 1, filter height, filter width, output channels ]
101 
102  unsigned int filterHeight = filterShape[1];
103  unsigned int dilatedFilterHeight = filterHeight + (m_Param.m_DilationY - 1) * (filterHeight - 1);
104  unsigned int readHeight = (inputHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - dilatedFilterHeight;
105  unsigned int outputHeight = 1 + (readHeight / m_Param.m_StrideY);
106 
107  unsigned int filterWidth = filterShape[2];
108  unsigned int dilatedFilterWidth = filterWidth + (m_Param.m_DilationX - 1) * (filterWidth - 1);
109  unsigned int readWidth = (inputWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - dilatedFilterWidth;
110  unsigned int outputWidth = 1 + (readWidth / m_Param.m_StrideX);
111 
112  unsigned int outputChannels = filterShape[3];
113  unsigned int outputBatchSize = inputBatchSize;
114 
116  TensorShape{ outputBatchSize, outputHeight, outputWidth, outputChannels } :
117  TensorShape{ outputBatchSize, outputChannels, outputHeight, outputWidth };
118 
119  return std::vector<TensorShape>{ tensorShape };
120 }
121 
123 {
125 
126  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
127 
129 
130  ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
131  "DepthwiseConvolution2dLayer: Weights data should not be null.");
132 
133  auto inferredShapes = InferOutputShapes({
136  });
137 
138  ARMNN_ASSERT(inferredShapes.size() == 1);
139 
140  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
141 }
142 
144 {
145  // For API stability DO NOT ALTER order and add new members to the end of vector
146  return {m_Weight, m_Bias};
147 }
148 
150 void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
151 {
152  visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), GetName());
153 }
155 
157 {
158  strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
159 }
160 
161 } // namespace armnn
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the DepthwiseConvolution2d type.
bool m_BiasEnabled
Enable/disable bias.
DepthwiseConvolution2dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
unsigned int GetWidthIndex() const
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DepthwiseConvolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
uint32_t GetNumInputs() const
Get the number of views/inputs.
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string (currently used in DotSerializer and company)...
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
Copyright (c) 2021 ARM Limited and Contributors.
const DepthwiseConvolution2dDescriptor & GetParameters() const override
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
uint32_t m_DilationY
Dilation factor value for height dimension.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
unsigned int GetHeightIndex() const
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of DepthwiseConvolution2dLayer...
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t GetNumInputs(bool biasEnabled)
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:124
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &param, const char *name)
Constructor to create a DepthwiseConvolution2dLayer.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:317
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:421
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467
uint32_t m_PadRight
Padding right value in the width dimension.