From 6f92c8e9f8bb38dcf5dccf8deeff5112ecd8e37c Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Wed, 22 Nov 2023 11:41:15 +0000 Subject: Update Doxygen for 23.11 Signed-off-by: Nikhil Raj Change-Id: I47cd933f5002cb94a73aa97689d7b3d9c93cb849 --- 23.11/_avg_pool2_d_ignore_value_operator_8cpp.html | 302 +++++++++++++++++++++ 1 file changed, 302 insertions(+) create mode 100644 23.11/_avg_pool2_d_ignore_value_operator_8cpp.html (limited to '23.11/_avg_pool2_d_ignore_value_operator_8cpp.html') diff --git a/23.11/_avg_pool2_d_ignore_value_operator_8cpp.html b/23.11/_avg_pool2_d_ignore_value_operator_8cpp.html new file mode 100644 index 0000000000..535fa85ac5 --- /dev/null +++ b/23.11/_avg_pool2_d_ignore_value_operator_8cpp.html @@ -0,0 +1,302 @@ + + + + + + + + +Arm NN: src/backends/tosaCommon/operatorMappings/AvgPool2DIgnoreValueOperator.cpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  23.11 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
AvgPool2DIgnoreValueOperator.cpp File Reference
+
+
+
+Include dependency graph for AvgPool2DIgnoreValueOperator.cpp:
+
+
+
+
+
+

Go to the source code of this file.

+ + + + +

+Functions

TosaSerializationBasicBlock * ConvertAvgPool2DIgnoreValueToTosaOperator (const Layer *layer, const std::vector< const TensorInfo * > &inputs, const std::vector< const TensorInfo * > &outputs, const Pooling2dDescriptor *poolDescriptor)
 
+

Function Documentation

+ +

◆ ConvertAvgPool2DIgnoreValueToTosaOperator()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TosaSerializationBasicBlock* ConvertAvgPool2DIgnoreValueToTosaOperator (const Layerlayer,
const std::vector< const TensorInfo * > & inputs,
const std::vector< const TensorInfo * > & outputs,
const Pooling2dDescriptorpoolDescriptor 
)
+
+ +

Definition at line 8 of file AvgPool2DIgnoreValueOperator.cpp.

+
12 {
+
13  std::string padInputName = std::string("input0_");
+
14  std::string padOutputName = std::string("intermediate0_") + GetUniqueTosaMappingID();
+
15  std::string poolOutputName = std::string("output0_");
+
16  std::string blockName = std::string("Op_AVG_POOL2D_block_") + GetUniqueTosaMappingID();
+
17 
+
18  // If a layer is present then the block will be used for execution, so input and output names need to be determined
+
19  // using the previous and following layers so the graph is connected correctly. For validation this doesn't matter.
+
20  if(layer != nullptr)
+
21  {
+
22  // Get the layers connected to the input slots and determine unique tensors names.
+
23  Layer& connectedInputLayer = layer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+
24  padInputName = GenerateUniqueName(connectedInputLayer, 0);
+
25 
+
26  // Determine unique output tensor name.
+
27  poolOutputName = GenerateUniqueOutputName(*layer, 0);
+
28  }
+
29 
+
30  std::vector<int> paddings;
+
31  if (poolDescriptor->m_DataLayout == DataLayout::NHWC)
+
32  {
+
33  paddings = {0,
+
34  0,
+
35  static_cast<int>(poolDescriptor->m_PadTop),
+
36  static_cast<int>(poolDescriptor->m_PadBottom),
+
37  static_cast<int>(poolDescriptor->m_PadLeft),
+
38  static_cast<int>(poolDescriptor->m_PadRight),
+
39  0,
+
40  0
+
41  };
+
42  }
+
43  else
+
44  {
+
45  paddings = {0,
+
46  0,
+
47  0,
+
48  0,
+
49  static_cast<int>(poolDescriptor->m_PadTop),
+
50  static_cast<int>(poolDescriptor->m_PadBottom),
+
51  static_cast<int>(poolDescriptor->m_PadLeft),
+
52  static_cast<int>(poolDescriptor->m_PadRight)
+
53  };
+
54  }
+
55 
+
56  TosaPadAttribute padAttribute(paddings, 0, 0.0f);
+
57  auto* opPad = new TosaSerializationOperator(Op_PAD,
+
58  Attribute_PadAttribute,
+
59  &padAttribute,
+
60  {padInputName},
+
61  {padOutputName});
+
62 
+
63  std::vector<int> pad = {0, 0, 0, 0};
+
64  std::vector<int> kernel = {static_cast<int>(poolDescriptor->m_PoolHeight),
+
65  static_cast<int>(poolDescriptor->m_PoolWidth)};
+
66  std::vector<int> stride = {static_cast<int>(poolDescriptor->m_StrideY),
+
67  static_cast<int>(poolDescriptor->m_StrideX)};
+
68  TosaPoolAttribute poolAttribute(pad, kernel, stride, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));
+
69 
+
70  auto* opPool = new TosaSerializationOperator(Op_AVG_POOL2D,
+
71  Attribute_PoolAttribute,
+
72  &poolAttribute,
+
73  {padOutputName},
+
74  {poolOutputName});
+
75 
+
76  std::vector<TosaSerializationTensor*> tensors;
+
77 
+
78  std::vector<int32_t> inputShape = GetTosaTensorShape(inputs[0]->GetShape());
+
79  DType inputDType = ArmNNToDType(inputs[0]->GetDataType());
+
80 
+
81  // Only add input tensors if connected layer is an input layer.
+
82  // As intermediate or constant tensors will be created separately.
+
83  // There also can't be duplicate tensor.
+
84  if(padInputName.find("input0_") != std::string::npos)
+
85  {
+
86  tensors.push_back(new TosaSerializationTensor(padInputName, inputShape, inputDType, {}));
+
87  }
+
88 
+
89  std::vector<int32_t> outputShape = GetTosaTensorShape(outputs[0]->GetShape());
+
90  DType outputDType = ArmNNToDType(outputs[0]->GetDataType());
+
91 
+
92  std::vector<int32_t> intermediateShape;
+
93  if (poolDescriptor->m_DataLayout == DataLayout::NHWC)
+
94  {
+
95  intermediateShape = {inputShape[0],
+
96  inputShape[1] + paddings[2] + paddings[3],
+
97  inputShape[2] + paddings[4] + paddings[5],
+
98  inputShape[3]};
+
99  }
+
100  else
+
101  {
+
102  intermediateShape = {inputShape[0],
+
103  inputShape[1],
+
104  inputShape[2] + paddings[4] + paddings[5],
+
105  inputShape[3] + paddings[6] + paddings[7]};
+
106  }
+
107 
+
108  tensors.push_back(new TosaSerializationTensor(padOutputName, intermediateShape, inputDType, {}));
+
109  tensors.push_back(new TosaSerializationTensor(poolOutputName, outputShape, outputDType, {}));
+
110 
+
111  // operatorInputNames/operatorOutputNames ends up being the same as
+
112  // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
+
113  return new TosaSerializationBasicBlock(blockName, // name
+
114  mainName, // region name
+
115  {opPad, opPool}, // operators
+
116  tensors, // tensors
+
117  {padInputName}, // inputs
+
118  {poolOutputName}); // outputs
+
119 }
+
+

References ArmNNToDType(), GenerateUniqueName(), GenerateUniqueOutputName(), InputSlot::GetConnectedOutputSlot(), Layer::GetInputSlot(), OutputSlot::GetOwningLayer(), GetTosaTensorShape(), GetUniqueTosaMappingID(), Pooling2dDescriptor::m_DataLayout, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, and Pooling2dDescriptor::m_StrideY.

+ +

Referenced by GetTosaMapping().

+ +
+
+
+
+
uint32_t m_PoolHeight
Pooling height value.
+
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
+
uint32_t m_PadTop
Padding top value in the height dimension.
+
uint32_t m_PoolWidth
Pooling width value.
+
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
+ +
const std::string mainName
+
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
+
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
+
DType ArmNNToDType(const DataType &type)
+
uint32_t m_PadBottom
Padding bottom value in the height dimension.
+
uint32_t m_PadRight
Padding right value in the width dimension.
+
std::string GenerateUniqueOutputName(const Layer &layer, uint32_t layerSlot)
+
uint32_t m_PadLeft
Padding left value in the width dimension.
+
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
+
std::string GenerateUniqueName(const Layer &layer, uint32_t layerSlot)
+
std::vector< int32_t > GetTosaTensorShape(const TensorShape &shape)
+
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
+
std::string GetUniqueTosaMappingID()
+ + + + -- cgit v1.2.1