ArmNN
 23.11
SubgraphUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/StrategyBase.hpp>
9 #include <armnn/Descriptors.hpp>
11 
12 namespace armnn
13 {
14 
15 namespace
16 {
17 
18 /// Checks if a Layer has a DataLayout that is either NCHW or NCDHW.
19 class CheckForNCHW : public StrategyBase<NoThrowStrategy>
20 {
21 public:
22  CheckForNCHW()
23  {}
24 
25  void ExecuteStrategy(const armnn::IConnectableLayer* layer,
26  const armnn::BaseDescriptor& descriptor,
27  const std::vector<armnn::ConstTensor>& constants,
28  const char* name,
29  const armnn::LayerBindingId id = 0) override
30  {
31  armnn::IgnoreUnused(layer, constants, id, name);
32  switch (layer->GetType())
33  {
35  {
36  auto desc = static_cast<const armnn::BatchMatMulDescriptor&>(descriptor);
37  m_Result = desc.m_DataLayoutX == DataLayout::NCHW || desc.m_DataLayoutY == DataLayout::NCHW;
38  break;
39  }
41  {
42  CheckDescForNCHW(static_cast<const armnn::BatchNormalizationDescriptor&>(descriptor));
43  break;
44  }
46  {
47  CheckDescForNCHW(static_cast<const armnn::BatchToSpaceNdDescriptor&>(descriptor));
48  break;
49  }
51  {
52  CheckDescForNCHW(static_cast<const armnn::Convolution2dDescriptor&>(descriptor));
53  break;
54  }
56  {
57  CheckDescForNCHW(static_cast<const armnn::Convolution3dDescriptor&>(descriptor));
58  break;
59  }
61  {
62  CheckDescForNCHW(static_cast<const armnn::DepthwiseConvolution2dDescriptor&>(descriptor));
63  break;
64  }
66  {
67  CheckDescForNCHW(static_cast<const armnn::InstanceNormalizationDescriptor&>(descriptor));
68  break;
69  }
71  {
72  CheckDescForNCHW(static_cast<const armnn::L2NormalizationDescriptor&>(descriptor));
73  break;
74  }
76  {
77  CheckDescForNCHW(static_cast<const armnn::NormalizationDescriptor&>(descriptor));
78  break;
79  }
81  {
82  CheckDescForNCHW(static_cast<const armnn::Pooling2dDescriptor&>(descriptor));
83  break;
84  }
86  {
87  CheckDescForNCHW(static_cast<const armnn::Pooling3dDescriptor&>(descriptor));
88  break;
89  }
91  {
92  CheckDescForNCHW(static_cast<const armnn::SpaceToBatchNdDescriptor&>(descriptor));
93  break;
94  }
96  {
97  CheckDescForNCHW(static_cast<const armnn::SpaceToDepthDescriptor&>(descriptor));
98  break;
99  }
101  {
102  CheckDescForNCHW(static_cast<const armnn::StridedSliceDescriptor&>(descriptor));
103  break;
104  }
105  default:
106  {
107  m_Result = false;
108  }
109  }
110  }
111 
112  /// Returns true if the Layer had a DataLayout and it was NCHW or NCDHW.
113  /// Returns false if the Layer either doesn't have a DataLayout or if it
114  /// had a DataLayout that was neither NCHW nor NCDHW.
115  bool Result()
116  {
117  return m_Result;
118  }
119 
120 private:
121  template<typename Descriptor>
122  void CheckDescForNCHW(const Descriptor& descriptor)
123  {
124  m_Result = (descriptor.m_DataLayout == DataLayout::NCHW) || (descriptor.m_DataLayout == DataLayout::NCDHW);
125  }
126 
127  bool m_Result = false;
128 };
129 
130 //
131 // this helper only works if all layers where the inputs connect to are not selected
132 //
133 
134 SubgraphView::IInputSlots CreateIInputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
135 {
137  for (auto&& layer : layers)
138  {
139  for (unsigned int i = 0 ; i < layer->GetNumInputSlots(); ++i)
140  {
141  result.push_back(&(layer->GetInputSlot(i)));
142  }
143  }
144  return result;
145 }
146 
147 //
148 // this helper only works if all layers where the outputs connect to are not selected
149 //
150 
151 SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
152 {
154  for (auto &&layer: layers)
155  {
156  for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
157  {
158  result.push_back(&(layer->GetOutputSlot(i)));
159  }
160  }
161  return result;
162 }
163 
164 // Type used to hold the slot numbers to create the lists from. There should
165 // be a SlotList for each layer in the layers list
166 typedef std::vector<int> SlotList;
167 
168 template<typename ILayerType>
169 SubgraphView::IInputSlots CreateIInputsFromSlotLists(const std::vector<ILayerType*>& layers,
170  const std::vector<SlotList>& layersSlotLists)
171 {
172  ARMNN_THROW_INVALIDARG_IF_FALSE(layersSlotLists.size() == layers.size());
173 
175 
176  for (unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
177  {
178  const SlotList& slotList = layersSlotLists[layerIdx];
179  for (unsigned int slotIdx = 0 ; slotIdx < layers[layerIdx]->GetNumInputSlots(); ++slotIdx)
180  {
181  if (std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end())
182  {
183  result.push_back(&(layers[layerIdx]->GetInputSlot(slotIdx)));
184  }
185  }
186  }
187  return result;
188 }
189 
190 template<typename ILayerType>
191 SubgraphView::IOutputSlots CreateIOutputsFromSlotLists(const std::vector<ILayerType*>& layers,
192  const std::vector<SlotList>& layersSlotLists)
193 {
194  ARMNN_THROW_INVALIDARG_IF_FALSE(layersSlotLists.size() == layers.size());
195 
197  for (unsigned int layerIdx = 0; layerIdx < layers.size(); ++layerIdx)
198  {
199  const SlotList& slotList = layersSlotLists[layerIdx];
200  for (unsigned int slotIdx = 0; slotIdx < layers[layerIdx]->GetNumOutputSlots(); ++slotIdx)
201  {
202  bool foundIt = std::find(slotList.begin(), slotList.end(), slotIdx) != slotList.end();
203  if (foundIt)
204  {
205  result.push_back(&(layers[layerIdx]->GetOutputSlot(slotIdx)));
206  }
207  }
208  }
209  return result;
210 }
211 }
212 
213 inline bool IsNCHW(armnn::Layer& layer)
214 {
215  CheckForNCHW check;
216  layer.ExecuteStrategy(check);
217  return check.Result();
218 }
219 
220 inline void ReportUntouchedLayers(OptimizationViews& optimizationViews, std::map<LayerGuid, Layer*> untouched)
221 {
222  std::vector<Layer*> untouchedVector;
223  for (const auto& pair : untouched)
224  {
225  Layer* layer = pair.second;
226  SubgraphView subgraphView({layer},
227  CreateIInputsFrom({layer}),
228  CreateIOutputsFrom({layer}));
229  optimizationViews.AddUntouchedSubgraph(std::move(subgraphView));
230  }
231 }
232 
233 template<typename LayerType>
235  LayerType* baseLayer,
236  LayerType* replacementLayer,
237  PadLayer* padLayer)
238 {
239  SubgraphView substitutionSubgraph({padLayer, baseLayer},
240  CreateIInputsFrom({padLayer}),
241  CreateIOutputsFrom({baseLayer}));
242  SubgraphView replacementSubgraph(replacementLayer);
243 
244  optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
245 
246  return replacementLayer;
247 }
248 
249 /// Checks if the Layer is connected to any Layer that has an NCHW layout.
250 inline bool ConnectedToLayerWithNCHW(Layer* baseLayer)
251 {
252  Layer& parentLayer = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
253 
254  if (IsNCHW(parentLayer))
255  {
256  return true;
257  }
258  for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
259  {
260  Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
261  if (IsNCHW(nextLayer))
262  {
263  return true;
264  }
265  }
266  return false;
267 }
268 
269 /// Checks the Layer's Connections to see if it's connected to a Layer with the provided layerType. If dimSize is
270 /// provided will also check if the connecting Tensor has more than that number of dimensions
271 inline bool ConnectedToLayerType(Layer* baseLayer, LayerType layerType, unsigned int dimSize = 0)
272 {
273  Layer& parentLayer = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
274  TensorInfo parentTensorInfo = baseLayer->GetInputSlot(0).GetTensorInfo();
275 
276  if (parentTensorInfo.GetNumDimensions() > dimSize && parentLayer.GetType() == layerType)
277  {
278  return true;
279  }
280  for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
281  {
282  Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
283  TensorInfo nextTensorInfo = baseLayer->GetOutputSlot(0).GetConnection(i)->GetTensorInfo();
284 
285  if (nextTensorInfo.GetNumDimensions() > dimSize && nextLayer.GetType() == layerType)
286  {
287  return true;
288  }
289  }
290  return false;
291 }
292 
293 inline void RemoveReshapeLayer(ReshapeLayer* baseLayer,
294  std::map<LayerGuid, Layer*>& untouched,
295  OptimizationViews& optimizationViews)
296 {
297  if (baseLayer == nullptr)
298  {
299  return;
300  }
301  ReshapeDescriptor reshapeDescriptor = baseLayer->GetParameters();
302  Layer& parentLayer = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
303 
304  // Cannot currently remove the Reshape if it's connected to an Input, Constant or Splitter
305  if (parentLayer.GetType() == LayerType::Input || parentLayer.GetType() == LayerType::Constant)
306  {
307  return;
308  }
309 
310  // Cannot currently remove the Reshape if it's connected to an OutputSlot or Concat
311  for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
312  {
313  Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
314 
315  if (nextLayer.GetType() == LayerType::Output)
316  {
317  return;
318  }
319  }
320  auto it = untouched.find(baseLayer->GetGuid());
321  if (it == untouched.end())
322  {
323  // Already removed from map
324  return;
325  }
326  untouched.erase(it);
327 
328  // Override the InputSlot TensorInfos for all the layers connected to the Reshape's OutputSlot
329  for (unsigned int i = 0; i < baseLayer->GetOutputSlot(0).GetNumConnections(); ++i)
330  {
331  Layer& nextLayer = baseLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer();
332  auto inputIndex = baseLayer->GetOutputSlot(0).GetConnection(i)->GetSlotIndex();
333  TensorInfo reshapeInfo(baseLayer->GetOutputSlot(0).GetTensorInfo());
334  reshapeInfo.SetShape(reshapeDescriptor.m_TargetShape);
335  nextLayer.GetInputSlot(inputIndex).SetTensorInfo(reshapeInfo);
336  }
337  optimizationViews.AddDeletedSubgraph(baseLayer);
338 }
339 
340 template<typename LayerType>
342  Pooling2dLayer* baseLayer,
343  Pooling2dDescriptor& poolDescriptor,
344  PadLayer* padLayer)
345 {
346  IConnectableLayer* replacement =
347  optimizationViews.GetINetwork()->AddPooling2dLayer(poolDescriptor, "folded-pad-into-pool2d");
348  LayerType* replacementLayer = PolymorphicDowncast<LayerType*>(replacement);
349 
350  FoldPadLayer(optimizationViews,
351  baseLayer,
352  replacementLayer,
353  padLayer);
354 
355  return replacementLayer;
356 }
357 
358 //
359 // Layer sequence detection such as add + mul + add ( + optional activation )
360 //
361 
362 inline bool IsSequenceLayerType(Layer& layer, LayerType type)
363 {
364  return layer.GetType() == type;
365 }
366 
367 inline bool IsSequenceLayerType(Layer& layer, BinaryOperation type)
368 {
369  return (layer.GetType() == LayerType::ElementwiseBinary) &&
370  (PolymorphicDowncast<ElementwiseBinaryLayer*>(&layer)->GetParameters().m_Operation == type);
371 }
372 
373 // Detect a layer sequence and activation if specified. The activation must be at the end of the sequence.
374 template<typename TYPE>
375 bool IsLayerSequence(Layer& currentLayer,
376  TYPE first,
377  TYPE second,
378  TYPE third,
379  Layer* layerList[4],
380  bool handleValidActivates,
381  const std::vector<ActivationFunction>& validActivates)
382 {
383  auto PreviousLayer = [](Layer& layer)
384  {
385  return &layer.GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
386  };
387 
388  auto NextLayer = [](Layer& layer)
389  {
390  return &layer.GetOutputSlot(0).GetConnection(0)->GetOwningLayer();
391  };
392 
393  auto LayerIncomingConnectionDataType = [](Layer& layer)
394  {
395  return layer.GetInputSlot(0).GetTensorInfo().GetDataType();
396  };
397 
398  bool result = false;
399 
400  // Match in reverse so there is only 1 connection to check
401  if (IsSequenceLayerType(currentLayer, third))
402  {
403  // Save DataType of third layer
404  DataType dataType = LayerIncomingConnectionDataType(currentLayer);
405 
406  // Save third layer
407  layerList[2] = &currentLayer;
408 
409  // Check the layers that proceed this one for the requested grouping
410  Layer *prevLayer = PreviousLayer(currentLayer);
411  if (prevLayer && IsSequenceLayerType(*prevLayer, second))
412  {
413  bool dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
414  if (! dataTypesMatch)
415  {
416  return result;
417  }
418 
419  layerList[1] = prevLayer;
420  prevLayer = PreviousLayer(*prevLayer);
421  if (prevLayer && IsSequenceLayerType(*prevLayer, first))
422  {
423  dataTypesMatch = (dataType == LayerIncomingConnectionDataType(*prevLayer));
424  if (! dataTypesMatch)
425  {
426  return result;
427  }
428 
429  layerList[0] = prevLayer;
430 
431  // Detected the first 3 layers if we get to this point so now
432  // check to see if we have a valid activation. If there is no activation
433  // then the sequence still matches.
434  if (handleValidActivates)
435  {
436  Layer *nextLayer = NextLayer(currentLayer);
437  if (nextLayer)
438  {
440  {
441  // This layer is an activation, so it must be a valid type for the sequence
442  ActivationFunction activationFunction =
443  PolymorphicDowncast<ActivationLayer*>(nextLayer)->GetParameters().m_Function;
444  long count = std::count(validActivates.cbegin(),
445  validActivates.cend(),
446  activationFunction);
447  if (count > 0)
448  {
449  layerList[3] = nextLayer;
450  result = true;
451  }
452  }
453  else
454  {
455  // Next layer is not an activation so sequence still matches
456  result = true;
457  }
458  }
459  }
460  else
461  {
462  result = true;
463  }
464  }
465  }
466  }
467 
468  return result;
469 }
470 
471 } // namespace armnn
armnn::BatchNormalizationDescriptor
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
Definition: Descriptors.hpp:828
armnn::LayerType::SpaceToDepth
@ SpaceToDepth
armnn::OptimizationViews::AddUntouchedSubgraph
void AddUntouchedSubgraph(SubgraphView &&subgraph)
Definition: OptimizationViews.hpp:48
armnn::InputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition: Layer.cpp:587
armnn::SubgraphView::IOutputSlots
std::vector< IOutputSlot * > IOutputSlots
Definition: SubgraphView.hpp:60
armnn::LayerType::BatchNormalization
@ BatchNormalization
armnn::DataLayout::NCDHW
@ NCDHW
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
Descriptors.hpp
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::Pooling3dDescriptor
A Pooling3dDescriptor for the Pooling3dLayer.
Definition: Descriptors.hpp:431
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn::ConnectedToLayerType
bool ConnectedToLayerType(Layer *baseLayer, LayerType layerType, unsigned int dimSize=0)
Checks the Layer's Connections to see if it's connected to a Layer with the provided layerType.
Definition: SubgraphUtils.hpp:271
armnn::LayerType::InstanceNormalization
@ InstanceNormalization
armnn::IsLayerSequence
bool IsLayerSequence(Layer &currentLayer, TYPE first, TYPE second, TYPE third, Layer *layerList[4], bool handleValidActivates, const std::vector< ActivationFunction > &validActivates)
Definition: SubgraphUtils.hpp:375
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::IsSequenceLayerType
bool IsSequenceLayerType(Layer &layer, LayerType type)
Definition: SubgraphUtils.hpp:362
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::LayerType::StridedSlice
@ StridedSlice
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::LayerType::Normalization
@ Normalization
armnn::FoldPadLayer
LayerType * FoldPadLayer(OptimizationViews &optimizationViews, LayerType *baseLayer, LayerType *replacementLayer, PadLayer *padLayer)
Definition: SubgraphUtils.hpp:234
armnn::IsNCHW
bool IsNCHW(armnn::Layer &layer)
Definition: SubgraphUtils.hpp:213
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::LayerWithParameters::GetParameters
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
Definition: LayerWithParameters.hpp:19
armnn::LayerType::ElementwiseBinary
@ ElementwiseBinary
armnn::Layer
Definition: Layer.hpp:230
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
armnn::InputSlot::GetSlotIndex
unsigned int GetSlotIndex() const override
Definition: Layer.hpp:54
FoldPadIntoLayer2d.hpp
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::ReshapeLayer
This layer represents a reshape operation.
Definition: ReshapeLayer.hpp:15
armnn::IOutputSlot::GetConnection
virtual const IInputSlot * GetConnection(unsigned int index) const =0
armnn::RemoveReshapeLayer
void RemoveReshapeLayer(ReshapeLayer *baseLayer, std::map< LayerGuid, Layer * > &untouched, OptimizationViews &optimizationViews)
Definition: SubgraphUtils.hpp:293
ARMNN_THROW_INVALIDARG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_IF_FALSE(_cond)
Definition: Exceptions.hpp:212
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::FoldPadIntoAveragePool2d
LayerType * FoldPadIntoAveragePool2d(OptimizationViews &optimizationViews, Pooling2dLayer *baseLayer, Pooling2dDescriptor &poolDescriptor, PadLayer *padLayer)
Definition: SubgraphUtils.hpp:341
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn::SubgraphView::IInputSlots
std::vector< IInputSlot * > IInputSlots
Definition: SubgraphView.hpp:58
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:309
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::SubgraphView
The SubgraphView class represents a subgraph of a Graph.
Definition: SubgraphView.hpp:31
armnn::OptimizationViews
Definition: OptimizationViews.hpp:17
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::Convolution3dDescriptor
A Convolution3dDescriptor for the Convolution3dLayer.
Definition: Descriptors.hpp:588
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1039
armnn::Pooling2dLayer
This layer represents a pooling 2d operation.
Definition: Pooling2dLayer.hpp:13
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::OptimizationViews::AddSubstitution
void AddSubstitution(SubstitutionPair &&substitution)
Definition: OptimizationViews.hpp:38
armnn::ActivationFunction
ActivationFunction
Definition: Types.hpp:86
armnn::LayerType::Pooling2d
@ Pooling2d
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::LayerType::Pooling3d
@ Pooling3d
armnn::LayerType::BatchMatMul
@ BatchMatMul
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::LayerType::BatchToSpaceNd
@ BatchToSpaceNd
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::BinaryOperation
BinaryOperation
Definition: Types.hpp:138
armnn::IInputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
Gets the TensorInfo for this InputSlot.
armnn::Layer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:549
armnn::INetwork::AddPooling2dLayer
IConnectableLayer * AddPooling2dLayer(const Pooling2dDescriptor &pooling2dDescriptor, const char *name=nullptr)
Adds a 2D pooling layer to the network.
Definition: Network.cpp:356
armnn::LayerType::SpaceToBatchNd
@ SpaceToBatchNd
armnn::LayerType::L2Normalization
@ L2Normalization
armnn::OptimizationViews::AddDeletedSubgraph
void AddDeletedSubgraph(SubgraphView &&subgraph)
Definition: OptimizationViews.hpp:53
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::OptimizationViews::GetINetwork
INetwork * GetINetwork()
Definition: OptimizationViews.hpp:69
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::ReportUntouchedLayers
void ReportUntouchedLayers(OptimizationViews &optimizationViews, std::map< LayerGuid, Layer * > untouched)
Definition: SubgraphUtils.hpp:220
armnn::ConnectedToLayerWithNCHW
bool ConnectedToLayerWithNCHW(Layer *baseLayer)
Checks if the Layer is connected to any Layer that has an NCHW layout.
Definition: SubgraphUtils.hpp:250
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Activation
@ Activation
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::LayerType::Convolution3d
@ Convolution3d
StrategyBase.hpp
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
armnn::PadLayer
This layer represents a pad operation.
Definition: PadLayer.hpp:14
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
armnn::DataLayout::NCHW
@ NCHW