ArmNN
 22.02
DepthwiseConvolution2dLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnn/TypesUtils.hpp>
10 
12 
15 
16 #include <string>
17 
18 using namespace armnnUtils;
19 
20 namespace armnn
21 {
22 
24  const char* name)
26 {
27 }
28 
30 {
31  const std::vector<TensorShape>& inputShapes =
32  {
34  m_Weight->GetTensorInfo().GetShape()
35  };
36  const TensorShape filterShape = inputShapes[1];
37  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
38  unsigned int inputChannels = filterShape[1];
39  unsigned int filterWidth = filterShape[3];
40  unsigned int filterHeight = filterShape[2];
41  unsigned int depthMultiplier = filterShape[0];
42 
43  fn("FilterWidth",std::to_string(filterWidth));
44  fn("FilterHeight",std::to_string(filterHeight));
45  fn("DepthMultiplier",std::to_string(depthMultiplier));
46  fn("InputChannels",std::to_string(inputChannels));
47 
49 }
50 
51 std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
52 {
53  // on this level constant data should not be released..
54  ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
55 
57 
58  descriptor.m_Weight = m_Weight.get();
59 
61  {
62  ARMNN_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
63  descriptor.m_Bias = m_Bias.get();
64  }
65 
66  SetAdditionalInfo(descriptor);
67 
68  return factory.CreateWorkload(LayerType::DepthwiseConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
69 }
70 
72 {
73  auto layer = CloneBase<DepthwiseConvolution2dLayer>(graph, m_Param, GetName());
74  layer->m_Weight = m_Weight ? m_Weight : nullptr;
75 
76  if (layer->m_Param.m_BiasEnabled)
77  {
78  layer->m_Bias = m_Bias ? m_Bias : nullptr;
79  }
80 
81  return std::move(layer);
82 }
83 
84 std::vector<TensorShape>
85 DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
86 {
87  ARMNN_ASSERT(inputShapes.size() == 2);
88  const TensorShape& inputShape = inputShapes[0];
89  const TensorShape& filterShape = inputShapes[1];
90 
91  ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
92 
95 
96  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
97 
98  unsigned int inputBatchSize = inputShape[0];
99  unsigned int inputHeight = inputShape[dataLayoutIndex.GetHeightIndex()];
100  unsigned int inputWidth = inputShape[dataLayoutIndex.GetWidthIndex()];
101 
102  // Expected filter shape: [ 1, H, W, O ] - This shape does NOT depend on the data layout
103  // Namely: [ 1, filter height, filter width, output channels ]
104 
105  unsigned int filterHeight = filterShape[1];
106  unsigned int dilatedFilterHeight = filterHeight + (m_Param.m_DilationY - 1) * (filterHeight - 1);
107  unsigned int readHeight = (inputHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - dilatedFilterHeight;
108  unsigned int outputHeight = 1 + (readHeight / m_Param.m_StrideY);
109 
110  unsigned int filterWidth = filterShape[2];
111  unsigned int dilatedFilterWidth = filterWidth + (m_Param.m_DilationX - 1) * (filterWidth - 1);
112  unsigned int readWidth = (inputWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - dilatedFilterWidth;
113  unsigned int outputWidth = 1 + (readWidth / m_Param.m_StrideX);
114 
115  unsigned int outputChannels = filterShape[3];
116  unsigned int outputBatchSize = inputBatchSize;
117 
119  TensorShape{ outputBatchSize, outputHeight, outputWidth, outputChannels } :
120  TensorShape{ outputBatchSize, outputChannels, outputHeight, outputWidth };
121 
122  return std::vector<TensorShape>{ tensorShape };
123 }
124 
126 {
128 
129  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
130 
132 
133  // on this level constant data should not be released..
134  ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
135 
136  auto inferredShapes = InferOutputShapes({
138  m_Weight->GetTensorInfo().GetShape()
139  });
140 
141  ARMNN_ASSERT(inferredShapes.size() == 1);
142 
143  ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
144 }
145 
147 {
148  // For API stability DO NOT ALTER order and add new members to the end of vector
149  return {m_Weight, m_Bias};
150 }
151 
153 void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
154 {
155  ManagedConstTensorHandle managedWeight(m_Weight);
156  ConstTensor weightsTensor(managedWeight.GetTensorInfo(), managedWeight.Map());
157  Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
158 
159  ManagedConstTensorHandle managedBias(m_Bias);
160  if (GetParameters().m_BiasEnabled)
161  {
162  ConstTensor biasTensor(managedBias.GetTensorInfo(), managedBias.Map());
163  optionalBiasTensor = Optional<ConstTensor>(biasTensor);
164  }
165 
166  visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
167 }
169 
171 {
172  ManagedConstTensorHandle managedWeight(m_Weight);
173  std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
174 
175  ManagedConstTensorHandle managedBias(m_Bias);
176  if (GetParameters().m_BiasEnabled)
177  {
178  constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map(true)));
179  }
180 
181  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
182 }
183 
184 } // namespace armnn
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the DepthwiseConvolution2d type.
bool m_BiasEnabled
Enable/disable bias.
DepthwiseConvolution2dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
unsigned int GetWidthIndex() const
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DepthwiseConvolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string (currently used in DotSerializer and company)...
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:436
const TensorInfo & GetTensorInfo() const
Copyright (c) 2021 ARM Limited and Contributors.
const DepthwiseConvolution2dDescriptor & GetParameters() const override
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
uint32_t m_DilationY
Dilation factor value for height dimension.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:396
unsigned int GetHeightIndex() const
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of DepthwiseConvolution2dLayer...
void SerializeLayerParameters(ParameterStringifyFunction &fn) const override
Helper to serialize the layer parameters to string.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:352
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:321
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:124
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor &param, const char *name)
Constructor to create a DepthwiseConvolution2dLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store Bias values.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:248
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:323
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:316
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:66
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:415
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Depthwise Convolution 2D layer workload data.
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458
uint32_t m_PadRight
Padding right value in the width dimension.