ArmNN
 23.02
Layer.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "LayerFwd.hpp"
8 
10 #include <OutputHandler.hpp>
14 #include "InternalTypes.hpp"
16 #include "DllExport.hpp"
17 
18 #include <armnn/Types.hpp>
19 #include <armnn/Tensor.hpp>
20 #include <armnn/INetwork.hpp>
24 
25 #include <algorithm>
26 #include <functional>
27 #include <iostream>
28 #include <list>
29 #include <memory>
30 #include <string>
31 #include <vector>
33 
34 namespace armnn
35 {
36 
37 class IWorkload;
38 class IWorkloadFactory;
39 class Layer;
40 class Graph;
41 
42 class InputSlot final : public IInputSlot
43 {
44 public:
45  explicit InputSlot(Layer& owner, unsigned int slotIndex)
46  : m_OwningLayer(owner)
47  , m_Connection(nullptr)
48  , m_SlotIndex(slotIndex)
49  {}
50 
51  ~InputSlot();
52 
53  Layer& GetOwningLayer() const { return m_OwningLayer; }
54  unsigned int GetSlotIndex() const override { return m_SlotIndex; }
55 
56  const OutputSlot* GetConnectedOutputSlot() const { return m_Connection; }
57  OutputSlot* GetConnectedOutputSlot() { return m_Connection; }
58 
59  const IConnectableLayer& GetOwningIConnectableLayer() const override;
61 
62  /// Links the slot to an output slot or breaks an existing link if passing nullptr.
63  void SetConnection(OutputSlot* source)
64  {
65  if (m_Connection != nullptr && source != nullptr)
66  {
67  throw InvalidArgumentException("Tried to connect an output slot to an input slot, "
68  "but the latter already has a connection");
69  }
70  m_Connection = source;
71  }
72 
73  // Inserts single-output existing layer at this point in the graph.
74  void Insert(Layer& layer);
75 
76  // InputSlot
77 
78  const IOutputSlot* GetConnection() const override;
79  IOutputSlot* GetConnection() override;
80 
81 private:
82  Layer& m_OwningLayer;
83  OutputSlot* m_Connection;
84  const unsigned int m_SlotIndex;
85 };
86 
87 class OutputSlot final : public IOutputSlot
88 {
89 public:
90  explicit OutputSlot(Layer& owner, OutputHandler& outputHandler)
91  : m_OwningLayer(owner)
92  , m_OutputHandler(outputHandler)
93  , m_TensorHandleFactoryId(ITensorHandleFactory::LegacyFactoryId)
94  {}
95 
96  OutputSlot(const OutputSlot&) = delete;
97  OutputSlot& operator=(const OutputSlot&) = delete;
98  OutputSlot& operator=(OutputSlot&&) = delete;
99 
100  OutputSlot(OutputSlot&&) = default;
101 
103  {
104  try
105  {
106  // Coverity fix: DisconnectAll() may throw uncaught exceptions.
107  DisconnectAll();
108  }
109  catch (const std::exception& e)
110  {
111  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
112  // exception of type std::length_error.
113  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
114  std::cerr << "WARNING: An error has occurred when disconnecting all output slots: "
115  << e.what() << std::endl;
116  }
117  }
118 
119  Layer& GetOwningLayer() const { return m_OwningLayer; }
120 
121  const IConnectableLayer& GetOwningIConnectableLayer() const override;
123 
124  LayerGuid GetOwningLayerGuid() const override;
125 
126  const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
127  OutputHandler& GetOutputHandler() { return m_OutputHandler; }
128 
129  int Connect(InputSlot& destination);
130  void Disconnect(InputSlot& slot);
131 
132  const std::vector<InputSlot*>& GetConnections() const { return m_Connections; }
133  const std::vector<EdgeStrategy>& GetEdgeStrategies() const { return m_EdgeStrategies; }
134 
135  bool ValidateTensorShape(const TensorShape& shape) const;
136 
137  // Disconnect all conections.
138  void DisconnectAll();
139 
140  /// Moves all connections to another OutputSlot.
141  void MoveAllConnections(OutputSlot& destination);
142 
143  // IOutputSlot
144 
145  unsigned int GetNumConnections() const override { return armnn::numeric_cast<unsigned int>(m_Connections.size()); }
146  const InputSlot* GetConnection(unsigned int index) const override;
147  InputSlot* GetConnection(unsigned int index) override;
148 
149  void SetTensorInfo(const TensorInfo& tensorInfo) override;
150  const TensorInfo& GetTensorInfo() const override;
151  bool IsTensorInfoSet() const override;
152 
153  int Connect(IInputSlot& destination) override
154  {
155  return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
156  }
157 
158  void Disconnect(IInputSlot& slot) override
159  {
160  return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
161  }
162 
163  unsigned int CalculateIndexOnOwner() const override;
164 
165  bool operator==(const OutputSlot& other) const;
166 
169 
170  void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy);
171  EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const;
172 
173 private:
174  void ValidateConnectionIndex(unsigned int index) const;
175 
176  Layer& m_OwningLayer;
177  OutputHandler& m_OutputHandler;
178  std::vector<InputSlot*> m_Connections;
179 
180  ITensorHandleFactory::FactoryId m_TensorHandleFactoryId;
181  std::vector<EdgeStrategy> m_EdgeStrategies;
182 };
183 
184 // InputSlot inlines that need OutputSlot declaration.
185 
187 {
188  if (m_Connection != nullptr)
189  {
190  try
191  {
192  // Coverity fix: Disconnect() may throw uncaught exceptions.
193  m_Connection->Disconnect(*this);
194  }
195  catch (const std::exception& e)
196  {
197  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
198  // exception of type std::length_error.
199  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
200  std::cerr << "WARNING: An error has occurred when disconnecting an input slot: "
201  << e.what() << std::endl;
202  }
203  }
204 }
205 
208 
209 
210 class ScopedTensorHandle;
211 
212 // Base layer class
213 
214 using LayerPriority = unsigned int;
215 using AdditionalInfoObjectPtr = std::shared_ptr<void>;
216 
217 class Layer : public IConnectableLayer
218 {
219 public:
220  /// @param name - Optional name for the layer (may be nullptr).
221  Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
222  Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name);
223 
224  void ExecuteStrategy(IStrategy& strategy) const override;
225 
226 
227  const std::string& GetNameStr() const
228  {
229  return m_LayerName;
230  }
231 
232  const OutputHandler& GetOutputHandler(unsigned int i = 0) const
233  {
234  return m_OutputHandlers[i];
235  }
236 
237  OutputHandler& GetOutputHandler(unsigned int i = 0)
238  {
239  return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
240  }
241 
243  bool GetAllowExpandedDims() const { return m_AllowExpandedDims; };
244 
245  const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
246  const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
247 
248  // Allows non-const access to input slots, but don't expose vector (vector size is fixed at layer construction).
249  std::vector<InputSlot>::iterator BeginInputSlots() { return m_InputSlots.begin(); }
250  std::vector<InputSlot>::iterator EndInputSlots() { return m_InputSlots.end(); }
251 
252  // Allows non-const access to output slots, but don't expose vector (vector size is fixed at layer construction).
253  std::vector<OutputSlot>::iterator BeginOutputSlots() { return m_OutputSlots.begin(); }
254  std::vector<OutputSlot>::iterator EndOutputSlots() { return m_OutputSlots.end(); }
255 
256  // Checks whether the outputs of this layer don't have any connection.
258  {
259  unsigned int numConnections = 0;
260 
261  for (auto&& output : GetOutputSlots())
262  {
263  numConnections += output.GetNumConnections();
264  }
265 
266  return (GetNumOutputSlots() > 0) && (numConnections == 0);
267  }
268 
269  // Used for sorting.
270  void ResetPriority() const;
271  LayerPriority GetPriority() const;
272 
273  LayerType GetType() const override { return m_Type; }
274 
275  DataType GetDataType() const;
276 
277  const BackendId& GetBackendId() const { return m_BackendId; }
278  void SetBackendId(const BackendId& id) override { m_BackendId = id; }
279 
280  // Virtuals
281 
282  virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const = 0;
283 
284  virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
285  const IWorkloadFactory& factory,
286  const bool IsMemoryManaged = true);
287 
288  /// Creates a dynamically-allocated copy of this layer.
289  /// @param graph - The Graph into which this Layer is being cloned.
290  virtual Layer* Clone(Graph& graph) const = 0;
291 
292  void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
293 
294  virtual void ValidateTensorShapesFromInputs() = 0;
295 
296  std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
297 
298  /// Helper to serialize the layer parameters to string.
299  /// (currently used in DotSerializer and company).
301 
302  // Free up the constant source data
303  virtual void ReleaseConstantData();
304 
305  template<typename Op>
307  {
308  for (auto constant : GetConstantTensorsByRef())
309  {
310  if (constant.get())
311  {
312  op(constant);
313  }
314  }
315  };
316 
317  // IConnectableLayer
318 
319  const char* GetName() const override { return m_LayerName.c_str(); }
320 
321  unsigned int GetNumInputSlots() const override { return static_cast<unsigned int>(m_InputSlots.size()); }
322  unsigned int GetNumOutputSlots() const override { return static_cast<unsigned int>(m_OutputSlots.size()); }
323 
324  const InputSlot& GetInputSlot(unsigned int index) const override { return m_InputSlots.at(index); }
325  InputSlot& GetInputSlot(unsigned int index) override { return m_InputSlots.at(index); }
326  const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); }
327  OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); }
328 
329  void SetGuid(LayerGuid guid) { m_Guid = guid; }
330  LayerGuid GetGuid() const final { return m_Guid; }
331 
332  void AddRelatedLayerName(const std::string layerName) { m_RelatedLayerNames.emplace_back(layerName); }
333 
334  const std::list<std::string>& GetRelatedLayerNames() { return m_RelatedLayerNames; }
335 
336  virtual void Reparent(Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
337 
339  {
340  m_BackendHint = backend;
341  }
342  Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
343 
345  {
346  m_ShapeInferenceMethod = shapeInferenceMethod;
347  }
348 
349  void SetAllowExpandedDims(bool allowExpandedDims)
350  {
351  m_AllowExpandedDims = allowExpandedDims;
352  }
353 
354  template<typename T>
355  std::shared_ptr<T> GetAdditionalInformation() const
356  {
357  return std::static_pointer_cast<T>(m_AdditionalInfoObject);
358  }
359 
361  {
362  m_AdditionalInfoObject = additionalInfo;
363  }
364 
365  virtual const BaseDescriptor& GetParameters() const override { return m_NullDescriptor; }
366 
367 protected:
368  // Graph needs access to the virtual destructor.
369  friend class Graph;
370  virtual ~Layer() = default;
371 
372  template <typename QueueDescriptor>
374  {
375  WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
376  CollectWorkloadInputs(dataCollector);
377  }
378 
379  template <typename QueueDescriptor>
381  {
382  WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
383  CollectWorkloadOutputs(dataCollector);
384  }
385 
386  void ValidateAndCopyShape(const TensorShape& outputShape,
387  const TensorShape& inferredShape,
388  const ShapeInferenceMethod shapeInferenceMethod,
389  const std::string& layerName,
390  const unsigned int outputSlotIndex = 0);
391 
392  void VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod);
393 
394  /// Helper function to reduce duplication in *Layer::CreateWorkload.
395  template <typename QueueDescriptor>
397  {
399  CollectQueueDescriptorInputs(descriptor, info);
400  CollectQueueDescriptorOutputs(descriptor, info);
401  return info;
402  }
403 
404  template <typename LayerType, typename ... Params>
405  LayerType* CloneBase(Graph& graph, Params&& ... params) const;
406 
407  // Retrieve the Handles to the constants
408  // Marking this as override and having this here keeps IConnectable abstract with only pure virtual function
409  virtual ConstantTensors GetConstantTensorsByRef() override {return ConstantTensors(); };
410 
411  // "Blob"
413 
414  // Utility method to set a pointer in the queueDescriptor to the "blob" location in the layer
415  void SetAdditionalInfo(QueueDescriptor& descriptor) const;
416 
417 private:
418  void CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const;
419  void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const;
420 
421 protected:
422  std::vector<OutputHandler> m_OutputHandlers;
424 
425 private:
426  const std::string m_LayerName;
427 
428  std::vector<InputSlot> m_InputSlots;
429  std::vector<OutputSlot> m_OutputSlots;
430 
431  const LayerType m_Type;
432  BackendId m_BackendId;
433  Optional<BackendId> m_BackendHint;
434 
435  /// Used for sorting.
436  mutable LayerPriority m_Priority = 0;
437  mutable bool m_Visiting = false;
438 
439  bool m_AllowExpandedDims = false;
440 
441  LayerGuid m_Guid;
442 
443  std::list<std::string> m_RelatedLayerNames;
444 
445  /// returned by layers which have no parameters associated with them.
446  /// has to be a member as it is returned as a const reference
447  /// declared static so that there is only ever one of them in memory
448  ARMNN_DLLEXPORT static NullDescriptor m_NullDescriptor;
449 };
450 
451 // A layer user-provided data can be bound to (e.g. inputs, outputs).
452 class BindableLayer : public Layer
453 {
454 public:
455  BindableLayer(unsigned int numInputSlots,
456  unsigned int numOutputSlots,
457  LayerType type,
458  const char* name,
459  LayerBindingId id)
460  : Layer(numInputSlots, numOutputSlots, type, name)
461  , m_Id(id)
462  {
463  }
464 
465  LayerBindingId GetBindingId() const { return m_Id; };
466 
467  void ExecuteStrategy(IStrategy& strategy) const override
468  {
469  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName(), GetBindingId());
470  }
471 
472 protected:
473  ~BindableLayer() = default;
474 
475 private:
476  LayerBindingId m_Id;
477 };
478 
479 } //namespace armnn
ARMNN_DLLEXPORT
#define ARMNN_DLLEXPORT
Definition: DllExport.hpp:17
armnn::NullDescriptor
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
Definition: Descriptors.hpp:30
armnn::BackendId
Definition: BackendId.hpp:75
armnn::OutputSlot::GetOwningIConnectableLayer
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:541
armnn::OutputSlot::SetTensorHandleFactory
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:200
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::CheckLocation
Definition: Exceptions.hpp:14
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:322
DllExport.hpp
armnn::OutputHandler
Definition: OutputHandler.hpp:28
armnn::OutputSlot
Definition: Layer.hpp:87
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::Layer::SetShapeInferenceMethod
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.hpp:344
armnn::Layer::SetGuid
void SetGuid(LayerGuid guid)
Definition: Layer.hpp:329
armnn::InputSlot
Definition: Layer.hpp:42
armnn::Layer::CloneBase
LayerType * CloneBase(Graph &graph, Params &&... params) const
Definition: LayerCloneBase.hpp:14
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn::InputSlot::~InputSlot
~InputSlot()
Definition: Layer.hpp:186
armnn::Layer::BeginInputSlots
std::vector< InputSlot >::iterator BeginInputSlots()
Definition: Layer.hpp:249
armnn::LayerBindingId
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:290
armnn::Layer::ReleaseConstantData
virtual void ReleaseConstantData()
Definition: Layer.cpp:304
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::Layer::AddRelatedLayerName
void AddRelatedLayerName(const std::string layerName)
Definition: Layer.hpp:332
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:378
armnn::InputSlot::GetConnection
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:206
armnn::OutputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
armnn::InputSlot::Insert
void Insert(Layer &layer)
Definition: Layer.cpp:48
armnn::OutputSlot::GetOutputHandler
OutputHandler & GetOutputHandler()
Definition: Layer.hpp:127
armnn::Layer::SetBackendId
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
Definition: Layer.hpp:278
armnn::OutputSlot::OutputSlot
OutputSlot(Layer &owner, OutputHandler &outputHandler)
Definition: Layer.hpp:90
PolymorphicDowncast.hpp
armnn::BindableLayer::BindableLayer
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
Definition: Layer.hpp:455
armnn::Layer
Definition: Layer.hpp:217
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::OutputSlot::IsTensorInfoSet
bool IsTensorInfoSet() const override
Definition: Layer.cpp:97
armnn::IConnectableLayer::ConstantTensors
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:124
armnn::Layer::BackendSelectionHint
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:338
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:491
armnn::BindableLayer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.hpp:467
armnn::BindableLayer::~BindableLayer
~BindableLayer()=default
armnn::LayerPriority
unsigned int LayerPriority
Definition: Layer.hpp:214
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::OutputSlot::GetOutputHandler
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:126
armnn::OutputSlot::operator==
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:172
armnn::Layer::EndInputSlots
std::vector< InputSlot >::iterator EndInputSlots()
Definition: Layer.hpp:250
OutputHandler.hpp
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:274
armnn::ShapeInferenceMethod
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:221
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:423
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:422
armnn::Layer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:536
armnn::Layer::Reparent
virtual void Reparent(Graph &dest, std::list< Layer * >::const_iterator iterator)=0
TensorHandleFactoryRegistry.hpp
INetwork.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IStrategy
Definition: IStrategy.hpp:16
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::BindableLayer::GetBindingId
LayerBindingId GetBindingId() const
Definition: Layer.hpp:465
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::Layer::SetAllowExpandedDims
void SetAllowExpandedDims(bool allowExpandedDims)
Definition: Layer.hpp:349
armnn::Layer::CollectQueueDescriptorInputs
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:373
armnn::OutputSlot::ValidateTensorShape
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:106
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::Layer::GetNameStr
const std::string & GetNameStr() const
Definition: Layer.hpp:227
armnn::AdditionalInfoObjectPtr
std::shared_ptr< void > AdditionalInfoObjectPtr
Definition: Layer.hpp:215
armnn::OutputSlot::SetEdgeStrategy
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:210
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::Layer::GetOutputSlot
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Definition: Layer.hpp:327
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:466
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:321
armnn::Layer::ResetPriority
void ResetPriority() const
Definition: Layer.cpp:322
armnn::Layer::PrepInfoAndDesc
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *Layer::CreateWorkload.
Definition: Layer.hpp:396
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
armnn::Layer::m_AdditionalInfoObject
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:409
armnn::Layer::ValidateTensorShapesFromInputs
virtual void ValidateTensorShapesFromInputs()=0
armnn::Layer::Clone
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
armnn::Layer::CollectQueueDescriptorOutputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:380
armnn::Layer::GetOutputHandler
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:232
armnn::OutputSlot::GetConnections
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:132
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:273
armnn::Layer::GetParameters
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
Definition: Layer.hpp:365
armnn::InputSlot::GetOwningIConnectableLayer
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:551
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:246
armnn::Layer::GetAllowExpandedDims
bool GetAllowExpandedDims() const
Definition: Layer.hpp:243
InternalTypes.hpp
armnn::OutputSlot::CalculateIndexOnOwner
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:159
armnn::Layer::GetRelatedLayerNames
const std::list< std::string > & GetRelatedLayerNames()
Definition: Layer.hpp:334
armnn::OutputSlot::DisconnectAll
void DisconnectAll()
Definition: Layer.cpp:136
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:41
armnn::BindableLayer
Definition: Layer.hpp:452
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::InputSlot::InputSlot
InputSlot(Layer &owner, unsigned int slotIndex)
Definition: Layer.hpp:45
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::Layer::IsOutputUnconnected
bool IsOutputUnconnected()
Definition: Layer.hpp:257
armnn::Layer::Layer
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:247
armnn::Layer::GetBackendId
const BackendId & GetBackendId() const
Definition: Layer.hpp:277
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:253
Tensor.hpp
armnn::OutputSlot::~OutputSlot
~OutputSlot()
Definition: Layer.hpp:102
armnn::OutputSlot::MoveAllConnections
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:145
armnn::Layer::GetOutputHandler
OutputHandler & GetOutputHandler(unsigned int i=0)
Definition: Layer.hpp:237
armnn::InputSlot::SetConnection
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:63
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:119
armnn::OutputSlot::operator=
OutputSlot & operator=(const OutputSlot &)=delete
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:422
armnn::OutputSlot::Disconnect
void Disconnect(IInputSlot &slot) override
Definition: Layer.hpp:158
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:245
armnn::Layer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:279
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Layer::GetConstantTensorsByRef
virtual ConstantTensors GetConstantTensorsByRef() override
Definition: Layer.hpp:409
armnn::Layer::SetAdditionalInfoForObject
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
Definition: Layer.hpp:360
armnn::Layer::EndOutputSlots
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:254
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
armnn::Layer::GetDataType
DataType GetDataType() const
Definition: Layer.cpp:313
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::Layer::GetAdditionalInformation
std::shared_ptr< T > GetAdditionalInformation() const
Definition: Layer.hpp:355
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
WorkloadInfo.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:330
armnn::OutputSlot::Connect
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
armnn::Graph
Definition: Graph.hpp:30
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::Layer::GetBackendHint
Optional< BackendId > GetBackendHint() const
Definition: Layer.hpp:342
armnn::InputSlot::GetConnectedOutputSlot
OutputSlot * GetConnectedOutputSlot()
Definition: Layer.hpp:57
armnn::OutputSlot::GetOwningLayerGuid
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:195
armnn::OutputSlot::GetEdgeStrategies
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:133
WorkloadDataCollector.hpp
armnn::OutputSlot::GetNumConnections
unsigned int GetNumConnections() const override
Definition: Layer.hpp:145
armnn::Layer::OperateOnConstantTensors
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:306
armnn::Optional
Definition: Optional.hpp:270
armnn::Layer::~Layer
virtual ~Layer()=default
armnn::Layer::GetPriority
LayerPriority GetPriority() const
Definition: Layer.cpp:328
armnn::Layer::CreateWorkload
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const =0
LayerFwd.hpp
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
NumericCast.hpp
armnn::Layer::SerializeLayerParameters
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:505
SerializeLayerParameters.hpp
armnn::Layer::GetInputSlot
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
Definition: Layer.hpp:325
IgnoreUnused.hpp
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:319
armnn::WorkloadDataCollector
Definition: WorkloadDataCollector.hpp:15
WorkloadData.hpp
armnn::InputSlot::GetSlotIndex
unsigned int GetSlotIndex() const override
Definition: Layer.hpp:54
Types.hpp
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::OutputSlot::GetEdgeStrategyForConnection
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:215
ITensorHandleFactory.hpp
armnn::OutputSlot::Disconnect
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:120
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::OutputSlot::Connect
int Connect(IInputSlot &destination) override
Definition: Layer.hpp:153
armnn::Layer::GetShapeInferenceMethod
ShapeInferenceMethod GetShapeInferenceMethod() const
Definition: Layer.hpp:242
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::BoostLogSeverityMapping::info
@ info
armnn::Layer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:397