ArmNN
 21.02
Layer.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "LayerFwd.hpp"
8 
10 #include <OutputHandler.hpp>
14 #include "InternalTypes.hpp"
16 
17 #include <armnn/Types.hpp>
18 #include <armnn/Tensor.hpp>
19 #include <armnn/INetwork.hpp>
23 
24 #include <algorithm>
25 #include <functional>
26 #include <iostream>
27 #include <list>
28 #include <memory>
29 #include <string>
30 #include <vector>
32 
33 namespace armnn
34 {
35 
36 class IWorkload;
37 class IWorkloadFactory;
38 class Layer;
39 class Graph;
40 
41 class InputSlot final : public IInputSlot
42 {
43 public:
44  explicit InputSlot(Layer& owner, unsigned int slotIndex)
45  : m_OwningLayer(owner)
46  , m_Connection(nullptr)
47  , m_SlotIndex(slotIndex)
48  {}
49 
50  ~InputSlot();
51 
52  Layer& GetOwningLayer() const { return m_OwningLayer; }
53  unsigned int GetSlotIndex() const { return m_SlotIndex; }
54 
55  const OutputSlot* GetConnectedOutputSlot() const { return m_Connection; }
56  OutputSlot* GetConnectedOutputSlot() { return m_Connection; }
57 
58  /// Links the slot to an output slot or breaks an existing link if passing nullptr.
59  void SetConnection(OutputSlot* source)
60  {
61  if (m_Connection != nullptr && source != nullptr)
62  {
63  throw InvalidArgumentException("Tried to connect an output slot to an input slot, "
64  "but the latter already has a connection");
65  }
66  m_Connection = source;
67  }
68 
69  // Inserts single-output existing layer at this point in the graph.
70  void Insert(Layer& layer);
71 
72  // IInputSlot
73 
74  const IOutputSlot* GetConnection() const override;
75  IOutputSlot* GetConnection() override;
76 
77 private:
78  Layer& m_OwningLayer;
79  OutputSlot* m_Connection;
80  const unsigned int m_SlotIndex;
81 };
82 
83 class OutputSlot final : public IOutputSlot
84 {
85 public:
86  explicit OutputSlot(Layer& owner, OutputHandler& outputHandler)
87  : m_OwningLayer(owner)
88  , m_OutputHandler(outputHandler)
89  , m_TensorHandleFactoryId(ITensorHandleFactory::LegacyFactoryId)
90  {}
91 
92  OutputSlot(const OutputSlot&) = delete;
93  OutputSlot& operator=(const OutputSlot&) = delete;
94  OutputSlot& operator=(OutputSlot&&) = delete;
95 
96  OutputSlot(OutputSlot&&) = default;
97 
99  {
100  try
101  {
102  // Coverity fix: DisconnectAll() may throw uncaught exceptions.
103  DisconnectAll();
104  }
105  catch (const std::exception& e)
106  {
107  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
108  // exception of type std::length_error.
109  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
110  std::cerr << "WARNING: An error has occurred when disconnecting all output slots: "
111  << e.what() << std::endl;
112  }
113  }
114 
115  Layer& GetOwningLayer() const { return m_OwningLayer; }
116 
117  LayerGuid GetOwningLayerGuid() const override;
118 
119  const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
120  OutputHandler& GetOutputHandler() { return m_OutputHandler; }
121 
122  int Connect(InputSlot& destination);
123  void Disconnect(InputSlot& slot);
124 
125  const std::vector<InputSlot*>& GetConnections() const { return m_Connections; }
126  const std::vector<EdgeStrategy>& GetEdgeStrategies() const { return m_EdgeStrategies; }
127 
128  bool ValidateTensorShape(const TensorShape& shape) const;
129 
130  // Disconnect all conections.
131  void DisconnectAll();
132 
133  /// Moves all connections to another OutputSlot.
134  void MoveAllConnections(OutputSlot& destination);
135 
136  // IOutputSlot
137 
138  unsigned int GetNumConnections() const override { return armnn::numeric_cast<unsigned int>(m_Connections.size()); }
139  const InputSlot* GetConnection(unsigned int index) const override;
140  InputSlot* GetConnection(unsigned int index) override;
141 
142  void SetTensorInfo(const TensorInfo& tensorInfo) override;
143  const TensorInfo& GetTensorInfo() const override;
144  bool IsTensorInfoSet() const override;
145 
146  int Connect(IInputSlot& destination) override
147  {
148  return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
149  }
150 
151  void Disconnect(IInputSlot& slot) override
152  {
153  return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
154  }
155 
156  unsigned int CalculateIndexOnOwner() const override;
157 
158  bool operator==(const OutputSlot& other) const;
159 
160  void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId& id);
161  ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const;
162 
163  void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy);
164  EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const;
165 
166 private:
167  void ValidateConnectionIndex(unsigned int index) const;
168 
169  Layer& m_OwningLayer;
170  OutputHandler& m_OutputHandler;
171  std::vector<InputSlot*> m_Connections;
172 
173  ITensorHandleFactory::FactoryId m_TensorHandleFactoryId;
174  std::vector<EdgeStrategy> m_EdgeStrategies;
175 };
176 
177 // InputSlot inlines that need OutputSlot declaration.
178 
180 {
181  if (m_Connection != nullptr)
182  {
183  try
184  {
185  // Coverity fix: Disconnect() may throw uncaught exceptions.
186  m_Connection->Disconnect(*this);
187  }
188  catch (const std::exception& e)
189  {
190  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
191  // exception of type std::length_error.
192  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
193  std::cerr << "WARNING: An error has occurred when disconnecting an input slot: "
194  << e.what() << std::endl;
195  }
196  }
197 }
198 
201 
202 
204 
205 // Base layer class
206 
207 using LayerPriority = unsigned int;
208 using AdditionalInfoObjectPtr = std::shared_ptr<void>;
209 
210 class Layer : public IConnectableLayer
211 {
212 public:
213  /// @param name - Optional name for the layer (may be nullptr).
214  Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
215  Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name);
216 
217  void ExecuteStrategy(IStrategy& strategy) const override;
218 
219 
220  const std::string& GetNameStr() const
221  {
222  return m_LayerName;
223  }
224 
225  const OutputHandler& GetOutputHandler(unsigned int i = 0) const
226  {
227  return m_OutputHandlers[i];
228  }
229 
230  OutputHandler& GetOutputHandler(unsigned int i = 0)
231  {
232  return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
233  }
234 
235  ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
236 
237  const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
238  const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
239 
240  // Allows non-const access to input slots, but don't expose vector (vector size is fixed at layer construction).
241  std::vector<InputSlot>::iterator BeginInputSlots() { return m_InputSlots.begin(); }
242  std::vector<InputSlot>::iterator EndInputSlots() { return m_InputSlots.end(); }
243 
244  // Allows non-const access to output slots, but don't expose vector (vector size is fixed at layer construction).
245  std::vector<OutputSlot>::iterator BeginOutputSlots() { return m_OutputSlots.begin(); }
246  std::vector<OutputSlot>::iterator EndOutputSlots() { return m_OutputSlots.end(); }
247 
248  // Checks whether the outputs of this layer don't have any connection.
250  {
251  unsigned int numConnections = 0;
252 
253  for (auto&& output : GetOutputSlots())
254  {
255  numConnections += output.GetNumConnections();
256  }
257 
258  return (GetNumOutputSlots() > 0) && (numConnections == 0);
259  }
260 
261  // Used for sorting.
262  void ResetPriority() const;
263  LayerPriority GetPriority() const;
264 
265  LayerType GetType() const override { return m_Type; }
266 
267  DataType GetDataType() const;
268 
269  const BackendId& GetBackendId() const { return m_BackendId; }
270  void SetBackendId(const BackendId& id) { m_BackendId = id; }
271 
272  // Virtuals
273 
274  virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const = 0;
275 
276  virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
277  const IWorkloadFactory& factory,
278  const bool IsMemoryManaged = true);
279 
280  /// Creates a dynamically-allocated copy of this layer.
281  /// @param graph - The Graph into which this Layer is being cloned.
282  virtual Layer* Clone(Graph& graph) const = 0;
283 
284  void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
285 
286  virtual void ValidateTensorShapesFromInputs() = 0;
287 
288  std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
289 
290  /// Helper to serialize the layer parameters to string.
291  /// (currently used in DotSerializer and company).
292  virtual void SerializeLayerParameters(ParameterStringifyFunction& fn) const;
293 
294  // Free up the constant source data
295  virtual void ReleaseConstantData();
296 
297  template<typename Op>
299  {
300  for (auto constant : GetConstantTensorsByRef())
301  {
302  if (constant.get())
303  {
304  op(constant);
305  }
306  }
307  };
308 
309  // IConnectableLayer
310 
311  const char* GetName() const override { return m_LayerName.c_str(); }
312 
313  unsigned int GetNumInputSlots() const override { return static_cast<unsigned int>(m_InputSlots.size()); }
314  unsigned int GetNumOutputSlots() const override { return static_cast<unsigned int>(m_OutputSlots.size()); }
315 
316  const InputSlot& GetInputSlot(unsigned int index) const override { return m_InputSlots.at(index); }
317  InputSlot& GetInputSlot(unsigned int index) override { return m_InputSlots.at(index); }
318  const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); }
319  OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); }
320 
321  void SetGuid(LayerGuid guid) { m_Guid = guid; }
322  LayerGuid GetGuid() const final { return m_Guid; }
323 
324  void AddRelatedLayerName(const std::string layerName) { m_RelatedLayerNames.emplace_back(layerName); }
325 
326  const std::list<std::string>& GetRelatedLayerNames() { return m_RelatedLayerNames; }
327 
328  virtual void Reparent(Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
329 
331  {
332  m_BackendHint = backend;
333  }
334  Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
335 
337  {
338  m_ShapeInferenceMethod = shapeInferenceMethod;
339  }
340 
341  template<typename T>
342  std::shared_ptr<T> GetAdditionalInformation() const
343  {
344  return std::static_pointer_cast<T>(m_AdditionalInfoObject);
345  }
346 
348  {
349  m_AdditionalInfoObject = additionalInfo;
350  }
351 
352 protected:
353  // Graph needs access to the virtual destructor.
354  friend class Graph;
355  virtual ~Layer() = default;
356 
357  template <typename QueueDescriptor>
359  {
360  WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
361  CollectWorkloadInputs(dataCollector);
362  }
363 
364  template <typename QueueDescriptor>
366  {
367  WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
368  CollectWorkloadOutputs(dataCollector);
369  }
370 
371  void ValidateAndCopyShape(const TensorShape& outputShape,
372  const TensorShape& inferredShape,
373  const ShapeInferenceMethod shapeInferenceMethod,
374  const std::string& layerName,
375  const unsigned int outputSlotIndex = 0);
376 
377  void VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod);
378 
379  /// Helper function to reduce duplication in *Layer::CreateWorkload.
380  template <typename QueueDescriptor>
382  {
384  CollectQueueDescriptorInputs(descriptor, info);
385  CollectQueueDescriptorOutputs(descriptor, info);
386  return info;
387  }
388 
389  template <typename LayerType, typename ... Params>
390  LayerType* CloneBase(Graph& graph, Params&& ... params) const;
391 
392  // Retrieve the Handles to the constants
393  using ConstantTensors = std::vector<std::reference_wrapper<std::unique_ptr<ScopedCpuTensorHandle>>>;
394  virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
395 
396  // "Blob"
398 
399  // Utility method to set a pointer in the queueDescriptor to the "blob" location in the layer
400  void SetAdditionalInfo(QueueDescriptor& descriptor) const;
401 
402 private:
403  void CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const;
404  void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const;
405 
406 protected:
407  std::vector<OutputHandler> m_OutputHandlers;
409 
410 private:
411  const std::string m_LayerName;
412 
413  std::vector<InputSlot> m_InputSlots;
414  std::vector<OutputSlot> m_OutputSlots;
415 
416  const LayerType m_Type;
417  BackendId m_BackendId;
418  Optional<BackendId> m_BackendHint;
419 
420  /// Used for sorting.
421  mutable LayerPriority m_Priority = 0;
422  mutable bool m_Visiting = false;
423 
424  LayerGuid m_Guid;
425 
426  std::list<std::string> m_RelatedLayerNames;
427 
428 };
429 
430 // A layer user-provided data can be bound to (e.g. inputs, outputs).
431 class BindableLayer : public Layer
432 {
433 public:
434  BindableLayer(unsigned int numInputSlots,
435  unsigned int numOutputSlots,
436  LayerType type,
437  const char* name,
438  LayerBindingId id)
439  : Layer(numInputSlots, numOutputSlots, type, name)
440  , m_Id(id)
441  {
442  }
443 
444  LayerBindingId GetBindingId() const { return m_Id; };
445 
446  void ExecuteStrategy(IStrategy& strategy) const override
447  {
448  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName(), GetBindingId());
449  }
450 
451 protected:
452  ~BindableLayer() = default;
453 
454 private:
455  LayerBindingId m_Id;
456 };
457 
458 }
std::vector< InputSlot >::iterator EndInputSlots()
Definition: Layer.hpp:242
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:358
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:125
void Insert(Layer &layer)
Definition: Layer.cpp:20
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
DataLayout
Definition: Types.hpp:50
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
Optional< BackendId > GetBackendHint() const
Definition: Layer.hpp:334
void AddRelatedLayerName(const std::string layerName)
Definition: Layer.hpp:324
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
Definition: Layer.hpp:330
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.hpp:336
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:126
LayerBindingId GetBindingId() const
Definition: Layer.hpp:444
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:298
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.hpp:446
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Definition: Layer.hpp:319
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
ShapeInferenceMethod GetShapeInferenceMethod() const
Definition: Layer.hpp:235
unsigned int LayerPriority
Definition: Layer.hpp:207
void Disconnect(IInputSlot &slot) override
Definition: Layer.hpp:151
int Connect(IInputSlot &destination) override
Definition: Layer.hpp:146
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:270
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:237
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
bool IsOutputUnconnected()
Definition: Layer.hpp:249
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:210
Base class for all descriptors.
Definition: Descriptors.hpp:22
std::vector< InputSlot >::iterator BeginInputSlots()
Definition: Layer.hpp:241
std::shared_ptr< void > AdditionalInfoObjectPtr
Definition: Layer.hpp:208
unsigned int GetNumConnections() const override
Definition: Layer.hpp:138
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
std::vector< TensorInfo > m_InputTensorInfos
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
DataType
Definition: Types.hpp:32
void SetGuid(LayerGuid guid)
Definition: Layer.hpp:321
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:394
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
Definition: Layer.hpp:381
An output connection slot for a layer.
Definition: INetwork.hpp:38
InputSlot(Layer &owner, unsigned int slotIndex)
Definition: Layer.hpp:44
unsigned int GetSlotIndex() const
Definition: Layer.hpp:53
const std::string & GetNameStr() const
Definition: Layer.hpp:220
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
std::vector< TensorInfo > m_OutputTensorInfos
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
Definition: Layer.hpp:317
Layer & GetOwningLayer() const
Definition: Layer.hpp:52
OutputSlot * GetConnectedOutputSlot()
Definition: Layer.hpp:56
const BackendId & GetBackendId() const
Definition: Layer.hpp:269
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:407
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:238
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
Definition: Layer.hpp:434
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
Definition: Layer.hpp:347
const std::list< std::string > & GetRelatedLayerNames()
Definition: Layer.hpp:326
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:245
std::vector< ITensorHandle * > m_Outputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:365
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:59
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
std::vector< OutputSlot >::iterator EndOutputSlots()
Definition: Layer.hpp:246
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
OutputHandler & GetOutputHandler()
Definition: Layer.hpp:120
OutputSlot(Layer &owner, OutputHandler &outputHandler)
Definition: Layer.hpp:86
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:119
Contains information about inputs and outputs to a layer.
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::vector< ITensorHandle * > m_Inputs
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
std::vector< std::reference_wrapper< std::unique_ptr< ScopedCpuTensorHandle > >> ConstantTensors
Definition: Layer.hpp:393
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:177
An input connection slot for a layer.
Definition: INetwork.hpp:25
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408
OutputHandler & GetOutputHandler(unsigned int i=0)
Definition: Layer.hpp:230
std::shared_ptr< T > GetAdditionalInformation() const
Definition: Layer.hpp:342
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:419
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:322