ArmNN
 22.05.01
TransposeConvolution2dLayer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "LayerCloneBase.hpp"
8 
10 
13 
14 using namespace armnnUtils;
15 
16 namespace armnn
17 {
18 
20  const char* name)
22 {
23 }
24 
25 std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
26 {
27  ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
28 
30  descriptor.m_Weight = m_Weight.get();
31 
33  {
34  ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
35  descriptor.m_Bias = m_Bias.get();
36  }
37 
38  SetAdditionalInfo(descriptor);
39 
40  return factory.CreateWorkload(LayerType::TransposeConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
41 }
42 
44 {
45  auto layer = CloneBase<TransposeConvolution2dLayer>(graph, m_Param, GetName());
46 
47  layer->m_Weight = m_Weight ? m_Weight : nullptr;
48 
49  if (layer->m_Param.m_BiasEnabled)
50  {
51  layer->m_Bias = m_Bias ? m_Bias : nullptr;
52  }
53 
54  return std::move(layer);
55 }
56 
58  const std::vector<TensorShape>& inputShapes) const
59 {
60  ARMNN_ASSERT(inputShapes.size() == 2);
61  const TensorShape& inputShape = inputShapes[0];
62  const TensorShape& kernelShape = inputShapes[1];
63 
64  ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
65 
66  DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
67 
68  const unsigned int batches = inputShape[0];
69 
70  const unsigned int wInput = inputShape[dataLayoutIndex.GetWidthIndex()];
71  const unsigned int hInput = inputShape[dataLayoutIndex.GetHeightIndex()];
72 
73  const unsigned int wKernel = kernelShape[dataLayoutIndex.GetWidthIndex()];
74  const unsigned int hKernel = kernelShape[dataLayoutIndex.GetHeightIndex()];
75 
76  unsigned int wPadding = m_Param.m_PadLeft + m_Param.m_PadRight;
77  unsigned int hPadding = m_Param.m_PadTop + m_Param.m_PadBottom;
78 
79  unsigned int wOutput = (wInput - 1) * m_Param.m_StrideX + wKernel - wPadding;
80  unsigned int hOutput = (hInput - 1) * m_Param.m_StrideY + hKernel - hPadding;
81  unsigned int cOutput = kernelShape[0];
82 
84  TensorShape( { batches, hOutput, wOutput, cOutput } ) :
85  TensorShape( { batches, cOutput, hOutput, wOutput });
86 
87  return std::vector<TensorShape>({ tensorShape });
88 }
89 
91 {
93 
94  const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
95 
97 
98  ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
99 
100  std::vector<TensorShape> expectedOutputShape;
101  // If output_shape was specified then use it rather than calculate an inferred output shape.
103  {
104  TensorShape shapeAsTensorShape(static_cast<unsigned int>(m_Param.m_OutputShape.size()),
105  m_Param.m_OutputShape.data());
106  expectedOutputShape.push_back(shapeAsTensorShape);
107  }
108  else
109  {
110  expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
111  m_Weight->GetTensorInfo().GetShape() });
112  }
113 
114  ARMNN_ASSERT(expectedOutputShape.size() == 1);
115 
116  ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
117 }
118 
120 {
121  // For API stability DO NOT ALTER order and add new members to the end of vector
122  return {m_Weight, m_Bias};
123 }
124 
126 void TransposeConvolution2dLayer::Accept(ILayerVisitor& visitor) const
127 {
128  ManagedConstTensorHandle managedWeight(m_Weight);
129  ConstTensor weightsTensor(managedWeight.GetTensorInfo(), managedWeight.Map());
130 
131  Optional<ConstTensor> optionalBiasTensor = EmptyOptional();
132  ManagedConstTensorHandle managedBias(m_Bias);
133  if (GetParameters().m_BiasEnabled)
134  {
135  ConstTensor biasTensor(managedBias.GetTensorInfo(), managedBias.Map());
136  optionalBiasTensor = Optional<ConstTensor>(biasTensor);
137  }
138 
139  visitor.VisitTransposeConvolution2dLayer(this, GetParameters(), weightsTensor, optionalBiasTensor, GetName());
140 }
142 
144 {
145  ManagedConstTensorHandle managedWeight(m_Weight);
146  std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
147 
148  ManagedConstTensorHandle managedBias(m_Bias);
149  if (GetParameters().m_BiasEnabled)
150  {
151  constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map()));
152  }
153 
154  strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
155 }
156 
157 } // namespace armnn
ConstantTensors GetConstantTensorsByRef() override
Retrieve the handles to the constant values stored by the layer.
TransposeConvolution2dDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
This layer represents a 2D transpose convolution operation.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
bool m_BiasEnabled
Enable/disable bias.
std::vector< unsigned int > m_OutputShape
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
bool m_OutputShapeEnabled
Output shape if it has been specified.
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
const TensorInfo & GetTensorInfo() const
Copyright (c) 2021 ARM Limited and Contributors.
const TransposeConvolution2dDescriptor & GetParameters() const override
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of TransposeConvolution2dLayer...
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor &param, const char *name)
Constructor to create a TransposeConvolution2dLayer.
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:124
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the TransposeConvolution2d type.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
ARMNN_NO_DEPRECATE_WARN_END void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
std::shared_ptr< ConstTensorHandle > m_Bias
A unique pointer to store bias values.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
uint32_t m_PadTop
Padding top value in the height dimension.
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
uint32_t m_PadLeft
Padding left value in the width dimension.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
uint32_t m_PadRight
Padding right value in the width dimension.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:317
const void * Map(bool blocking=true)
RAII Managed resource Unmaps MemoryArea once out of scope.
TransposeConvolution2dLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infers the output shapes from given input shapes and layer properties.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store weight values.
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:421
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:467