ArmNN
 24.02
TransposeConv2dOperator.cpp File Reference
Include dependency graph for TransposeConv2dOperator.cpp:

Go to the source code of this file.

Functions

TosaSerializationBasicBlock * ConvertTransposeConv2dToTosaOperator (const Layer *layer, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const TransposeConvolution2dDescriptor *descriptor)
 

Function Documentation

◆ ConvertTransposeConv2dToTosaOperator()

TosaSerializationBasicBlock* ConvertTransposeConv2dToTosaOperator ( const Layer layer,
const std::vector< const TensorInfo * > &  inputs,
const std::vector< const TensorInfo * > &  outputs,
const TransposeConvolution2dDescriptor descriptor 
)

Definition at line 10 of file TransposeConv2dOperator.cpp.

14 {
15  std::string input0Name = std::string("input0_");
16  std::string input1Name = std::string("constant_") + GetUniqueTosaMappingID();
17  std::string input2Name = std::string("constant_") + GetUniqueTosaMappingID();
18  std::string outputName = std::string("output0_");
19  std::string blockName = std::string("Op_TRANSPOSE_CONV2D_block_") + GetUniqueTosaMappingID();
20 
21  // If a layer is present then the block will be used for execution, so input and output names need to be determined
22  // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
23  if(layer != nullptr)
24  {
25  // Get the layers connected to the input slots and determine unique tensor names.
26  Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
27  input0Name = GenerateUniqueName(connectedInputLayer, 0);
28 
29  // Determine unique output tensor name.
30  outputName = GenerateUniqueOutputName(*layer, 0);
31  }
32 
33  std::vector<TosaSerializationTensor*> tensors;
34  std::vector<TosaSerializationOperator*> operators;
35 
36  // Setup input tensor
37  // Only add tensor if connected layer is an input layer.
38  // As intermediate or constant tensors will be created separately.
39  // There also can't be duplicate tensors.
40  if(input0Name.find("input0_") != std::string::npos)
41  {
42  std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
43  DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());
44 
45  tensors.push_back(new TosaSerializationTensor(input0Name, inputShape0, inputDType0, {}));
46  }
47 
48  // Setup weights tensor, constant data will get copied during SetConstantTensorData
49  operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {input1Name}));
50 
51  // During validation the TensorInfo can be retrieved from the inputs.
52  // During execution, it is only available through the layer so use m_Weight.
53  if(layer == nullptr)
54  {
55  std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
56  DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());
57 
58  tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, {}));
59  }
60  else
61  {
62  auto transposeConv2dLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(layer);
63 
64  std::vector<int32_t> inputShape1 = GetTosaTensorShape(
65  transposeConv2dLayer->m_Weight->GetTensorInfo().GetShape());
66  DType inputDType1 = ArmNNToDType(transposeConv2dLayer->m_Weight->GetTensorInfo().GetDataType());
67 
68  std::vector<uint8_t> uint8Data = ConvertConstantTensorDataToBuffer(transposeConv2dLayer->m_Weight);
69  tensors.push_back(new TosaSerializationTensor(input1Name, inputShape1, inputDType1, uint8Data));
70  }
71 
72  // Setup bias operator and tensor, constant data will get copied during SetConstantTensorData
73  operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {input2Name}));
74 
75  // During validation the TensorInfo can be retrieved from the inputs.
76  // During execution, it is only available through the layer so use m_Bias.
77  if(layer == nullptr && descriptor->m_BiasEnabled)
78  {
79  std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
80  DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());
81 
82  tensors.push_back(new TosaSerializationTensor(input2Name, inputShape2, inputDType2, {}));
83  }
84  else if(descriptor->m_BiasEnabled)
85  {
86  auto transposeConv2dLayer = PolymorphicDowncast<const TransposeConvolution2dLayer*>(layer);
87 
88  std::vector<int32_t> inputShape2 = GetTosaTensorShape(
89  transposeConv2dLayer->m_Bias->GetTensorInfo().GetShape());
90  DType inputDType2 = ArmNNToDType(transposeConv2dLayer->m_Bias->GetTensorInfo().GetDataType());
91 
92  std::vector<uint8_t> uint8Data = ConvertConstantTensorDataToBuffer(transposeConv2dLayer->m_Bias);
93  tensors.push_back(new TosaSerializationTensor(input2Name, inputShape2, inputDType2, uint8Data));
94  }
95  else
96  {
97  // If bias is disabled, create a constant bias tensor of 0's as three inputs are required.
98  // The size of the bias must match the channels dimension, so get the correct index.
99  unsigned int index = (descriptor->m_DataLayout == DataLayout::NHWC) ? 3 : 1;
100 
101  std::vector<uint8_t> uint8Data;
102  std::vector<float> data(outputs[0]->GetShape()[index], 0.0f);
103 
104  TosaSerializationHandler::ConvertF32toU8(data, uint8Data);
105 
106  tensors.push_back(new TosaSerializationTensor(input2Name,
107  {static_cast<int32_t>(outputs[0]->GetShape()[index])},
108  DType_FP32,
109  uint8Data));
110  }
111 
112  // Setup Output Tensor
113  std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
114  DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());
115 
116  tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));
117 
118  // Set up TRANSPOSE_CONV2D operator
119  // The TOSA Reference Model pads the output shape, so it is added to output shape.
120  // In Arm NN we pad the input shape, so it is taken away.
121  // To offset this the negative padding value can be used.
122  std::vector<int> pad = {-static_cast<int>(descriptor->m_PadTop),
123  -static_cast<int>(descriptor->m_PadBottom),
124  -static_cast<int>(descriptor->m_PadLeft),
125  -static_cast<int>(descriptor->m_PadRight)};
126  std::vector<int> stride = {static_cast<int>(descriptor->m_StrideY),
127  static_cast<int>(descriptor->m_StrideX)};
128 
129  std::vector<int> outputShape;
130  // If available use shape in descriptor otherwise use output shape.
131  if (descriptor->m_OutputShape.size() == 4)
132  {
133  for (uint32_t i = 0; i < descriptor->m_OutputShape.size(); ++i)
134  {
135  outputShape.push_back(static_cast<int>(descriptor->m_OutputShape[i]));
136  }
137  }
138  else
139  {
140  for (uint32_t i = 0; i < outputs[0]->GetNumDimensions(); ++i)
141  {
142  outputShape.push_back(static_cast<int>(outputs[0]->GetShape()[i]));
143  }
144  }
145 
146  TosaTransposeConvAttribute attribute(pad, stride, outputShape, 0, 0);
147 
148  auto* op = new TosaSerializationOperator(Op_TRANSPOSE_CONV2D,
149  Attribute_TransposeConvAttribute,
150  &attribute,
151  {input0Name, input1Name, input2Name},
152  {outputName});
153  operators.push_back(op);
154 
155  // operatorInputNames/operatorOutputNames ends up being the same as
156  // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
157  return new TosaSerializationBasicBlock(blockName, // name
158  mainName, // region name
159  operators, // operators
160  tensors, // tensors
161  {input0Name, input1Name, input2Name}, // inputs
162  {outputName}); // outputs
163 }

References GenerateUniqueName(), GenerateUniqueOutputName(), InputSlot::GetConnectedOutputSlot(), Layer::GetInputSlot(), OutputSlot::GetOwningLayer(), GetTosaTensorShape(), and GetUniqueTosaMappingID().

Referenced by GetTosaMapping().

armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1469
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1477
ConvertConstantTensorDataToBuffer
std::vector< uint8_t > ConvertConstantTensorDataToBuffer(const std::shared_ptr< ConstTensorHandle > &tensorHandle)
Definition: TosaOperatorUtils.hpp:289
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1475
armnn::Layer
Definition: Layer.hpp:230
mainName
const std::string mainName
Definition: TosaOperatorUtils.hpp:19
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
ArmNNToDType
DType ArmNNToDType(const DataType &type)
Definition: TosaOperatorUtils.hpp:22
GenerateUniqueOutputName
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot)
Definition: TosaOperatorUtils.hpp:82
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1479
armnn::TransposeConvolution2dDescriptor::m_OutputShape
std::vector< unsigned int > m_OutputShape
Definition: Descriptors.hpp:1486
GenerateUniqueName
std::string GenerateUniqueName(const Layer &layer, uint32_t layerSlot)
Definition: TosaOperatorUtils.hpp:63
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1473
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1471
GetTosaTensorShape
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
Definition: TosaOperatorUtils.hpp:52
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1481
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1483
GetUniqueTosaMappingID
std::string GetUniqueTosaMappingID()
Definition: TosaOperatorUtils.hpp:100