ArmNN
 22.02
Layer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Layer.hpp"
6 
7 #include "Graph.hpp"
8 #include <ProfilingService.hpp>
12 
13 #include <fmt/format.h>
14 
15 #include <numeric>
16 
17 namespace armnn
18 {
19 
20 // Instantiate the static member variable
21 NullDescriptor Layer::m_NullDescriptor;
22 
24 {
25  ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
26 
27  OutputSlot* const prevSlot = GetConnectedOutputSlot();
28 
29  if (prevSlot != nullptr)
30  {
31  // Disconnects parent from this.
32  prevSlot->Disconnect(*this);
33 
34  // Connects inserted layer to parent.
35  ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
36  int idx = prevSlot->Connect(layer.GetInputSlot(0));
37  prevSlot->SetEdgeStrategy(armnn::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
38 
39  // Sets tensor info for inserted layer.
40  const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
41  layer.GetOutputHandler().SetTensorInfo(tensorInfo);
42  }
43 
44  // Connects inserted layer to this.
45  layer.GetOutputSlot(0).Connect(*this);
47 }
48 
49 const InputSlot* OutputSlot::GetConnection(unsigned int index) const
50 {
51  ValidateConnectionIndex(index);
52  return m_Connections[index];
53 }
54 
56 {
57  ValidateConnectionIndex(index);
58  return m_Connections[index];
59 }
60 
61 void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
62 {
63  GetOutputHandler().SetTensorInfo(tensorInfo);
64 }
65 
67 {
68  return GetOutputHandler().GetTensorInfo();
69 }
70 
72 {
73  if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
74  {
76  }
77  return GetOutputHandler().IsTensorInfoSet();
78 }
79 
81 {
82  ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
83  return shape == m_OutputHandler.GetTensorInfo().GetShape();
84 }
85 
86 int OutputSlot::Connect(InputSlot& destination)
87 {
88  destination.SetConnection(this);
89  m_Connections.push_back(&destination);
90  m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
91  return armnn::numeric_cast<int>(m_Connections.size() - 1);
92 }
93 
95 {
96  slot.SetConnection(nullptr);
97  auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
98 
99  if (it == m_Connections.end())
100  {
101  return;
102  }
103 
104  auto idx = std::distance(m_Connections.begin(), it);
105  m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
106 
107  m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
108 }
109 
111 {
112  while (GetNumConnections() > 0)
113  {
114  InputSlot& connection = *GetConnection(0);
115  Disconnect(connection);
116  }
117 }
118 
120 {
121  while (GetNumConnections() > 0)
122  {
123  ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
124  "Cannot move connections once memory strategies have be established.");
125 
126  InputSlot& connection = *GetConnection(0);
127  Disconnect(connection);
128  destination.Connect(connection);
129  destination.GetOutputHandler().SetTensorInfo(GetOutputHandler().GetTensorInfo());
130  }
131 }
132 
134 {
135  for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
136  {
137  if (GetOwningLayer().GetOutputSlot(i) == (*this))
138  {
139  return i;
140  }
141  }
142  ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
143  return 0; // Error
144 }
145 
146 bool OutputSlot::operator==(const OutputSlot& other) const
147 {
148  bool isSame = other.GetNumConnections() == GetNumConnections();
149  if (!isSame)
150  {
151  return false;
152  }
153 
154  for (unsigned int i = 0; i < GetNumConnections(); i++)
155  {
156  isSame &= other.GetConnection(i) == GetConnection(i);
157  }
158  return isSame;
159 }
160 
161 void OutputSlot::ValidateConnectionIndex(unsigned int index) const
162 {
163  if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
164  {
165  throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
166  }
167 }
168 
170 {
171  return GetOwningLayer().GetGuid();
172 }
173 
175 {
176  m_TensorHandleFactoryId = id;
177 }
178 
180 {
181  return m_TensorHandleFactoryId;
182 }
183 
184 void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
185 {
186  m_EdgeStrategies[connectionIndex] = strategy;
187 }
188 
190 {
191  return m_EdgeStrategies[connectionIdx];
192 }
193 
194 Layer::Layer(unsigned int numInputSlots,
195  unsigned int numOutputSlots,
196  LayerType type,
197  DataLayout layout,
198  const char* name)
199 : m_OutputHandlers(numOutputSlots)
200 , m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
201 , m_LayerName(name ? name : "")
202 , m_Type(type)
203 , m_BackendId()
204 , m_BackendHint(EmptyOptional())
205 , m_Guid(profiling::ProfilingService::GetNextGuid())
206 {
207  IgnoreUnused(layout);
208  m_InputSlots.reserve(numInputSlots);
209  for (unsigned int i = 0; i < numInputSlots; ++i)
210  {
211  m_InputSlots.emplace_back(*this, i);
212  }
213 
214  m_OutputSlots.reserve(numOutputSlots);
215  for (unsigned int i = 0; i < numOutputSlots; ++i)
216  {
217  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
218  }
219 }
220 
221 Layer::Layer(unsigned int numInputSlots,
222  unsigned int numOutputSlots,
223  LayerType type,
224  const char* name)
225 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
226 {
227 }
228 
229 void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
230 {
231  for (auto&& inputSlot : GetInputSlots())
232  {
233  // The graph must be well-formed at this point.
234  ARMNN_ASSERT(inputSlot.GetConnection());
235  const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
236  dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
237  }
238 }
239 
240 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
241 {
242  for (auto&& outputHandler : m_OutputHandlers)
243  {
244  outputHandler.CollectWorkloadOutputs(dataCollector);
245  }
246 }
247 
249 {
251 }
252 
254  const IWorkloadFactory& workloadFactory,
255  const bool IsMemoryManaged)
256 {
257  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
258  {
259 
260  OutputSlot& slot = GetOutputSlot(idx);
262 
263  OutputHandler& handler = GetOutputHandler(idx);
264  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
265  {
266  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
267  }
268  else
269  {
270  ITensorHandleFactory* handleFactory;
271  handleFactory = registry.GetFactory(factoryId);
272  ARMNN_ASSERT(handleFactory);
273  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
274  }
275  }
276 }
277 
279 {
280  // Now free up the static data.
281  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
282  {
283  handle.reset();
284  });
285 }
286 
288 {
289  if (GetNumInputSlots() > 0) // Ignore the input layer.
290  {
292  }
294 }
295 
297 {
298  m_Priority = 0;
299  m_Visiting = false;
300 }
301 
303 {
304  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
305  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
306 
307  if (GetType() == LayerType::Input)
308  {
309  m_Priority = inputPrio;
310  }
311  else if (GetType() == LayerType::Output)
312  {
313  m_Priority = outputPrio;
314  }
315  else if (m_Priority == 0)
316  {
317  if (m_Visiting)
318  {
319  throw GraphValidationException("Graph has circular dependencies: cannot walk");
320  }
321 
322  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
323  {
324  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
325  if (outputSlot)
326  {
327  const Layer& input = outputSlot->GetOwningLayer();
328  return std::max(prio, input.GetPriority());
329  }
330  else
331  {
332  // unconnected input slot
333  return prio;
334  }
335  };
336 
337  m_Visiting = true;
338  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
339  m_Visiting = false;
340 
341  if (parentPrio >= outputPrio)
342  {
343  throw GraphValidationException("Graph has too many edges");
344  }
345 
346  m_Priority = parentPrio + 1U;
347  }
348 
349  return m_Priority;
350 }
351 
352 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
353 {
354  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
355 
356  for (unsigned int i=0; i<expectedConnections; ++i)
357  {
358  if (GetInputSlot(i).GetConnection() == nullptr)
359  {
361  fmt::format("Input connection #{0} must be connected "
362  "for {1} layer {2} {3}",
363  i,
365  GetNameStr(),
366  location.AsString()));
367  }
368  }
369 }
370 
371 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
372 {
375 
376  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
377  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
378  // base class, this means the implementation needs to be overridden in the specific layers for
379  // the other cases. So the missing implementation justifies the UnimplementedException.
380 
382  {
384  fmt::format("Default implementation for InferOutputShapes can only be used for "
385  "layers with the same number of input and output slots. This doesn't "
386  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
388  GetNameStr(),
391  CHECK_LOCATION().AsString()));
392  }
393  return inputShapes;
394 }
395 
396 void Layer::ValidateAndCopyShape(const TensorShape& outputShape,
397  const TensorShape& inferredShape,
398  const ShapeInferenceMethod shapeInferenceMethod,
399  const std::string& layerName,
400  const unsigned int outputSlotIndex)
401 {
402  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
403  {
404  ConditionalThrowIfNotEqual<LayerValidationException>(
405  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
406  outputShape,
407  inferredShape);
408  return;
409  }
410 
411  if (outputShape.GetDimensionality() == Dimensionality::Specified)
412  {
413  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
414  {
415  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
416  {
417  std::stringstream ss;
418  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
419  "] does not match the inferred shape at dimension index [";
420  ss << i << "] " << outputShape << " != " << inferredShape;
421  throw LayerValidationException(ss.str());
422  }
423  }
424  }
425 
426  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
427 
428  armnn::TensorInfo inferredTensorInfo(inferredShape,
429  info.GetDataType(),
430  info.GetQuantizationScale(),
431  info.GetQuantizationOffset());
432 
433  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
434 }
435 
436 void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod)
437 {
438  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
439  {
440  ConditionalThrow<LayerValidationException>(
442  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
443 
444  ConditionalThrow<LayerValidationException>(
445  outputShape.AreAllDimensionsSpecified(),
446  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
447  }
448 }
449 
451 {
452  std::string guid = std::to_string(m_Guid);
453  std::string layerType = GetLayerTypeAsCString(m_Type);
454  std::string backendId = std::string(m_BackendId);
455  if (!(guid.compare("") == 0) && !guid.empty())
456  {
457  fn("Guid", guid);
458  }
459  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
460  {
461  fn("LayerName",m_LayerName);
462  }
463  if(!(layerType.compare("") == 0) && !layerType.empty())
464  {
465  fn("LayerType",layerType);
466  }
467  if(!(backendId.compare("") == 0) && !backendId.empty())
468  {
469  fn("BackendID",backendId);
470  }
471  std::shared_ptr<ActivationDescriptor>
472  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
473 
474  if (activationDescPtr)
475  {
476  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
477  }
478 }
479 
480 // default implementation of ExecuteStrategy
481 void Layer::ExecuteStrategy(IStrategy& strategy) const
482 {
483  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
484 }
485 
487 {
488  return m_OwningLayer;
489 }
490 
492 {
493  return m_OwningLayer;
494 }
495 
496 } // namespace armnn
void DisconnectAll()
Definition: Layer.cpp:110
virtual void ReleaseConstantData()
Definition: Layer.cpp:278
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:80
void Insert(Layer &layer)
Definition: Layer.cpp:23
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:184
DataLayout
Definition: Types.hpp:49
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:318
std::string AsString() const
Definition: Exceptions.hpp:29
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:371
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:169
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:303
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
Layer & GetOwningLayer() const
Definition: Layer.hpp:118
int Connect(InputSlot &destination)
Definition: Layer.cpp:86
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int LayerPriority
Definition: Layer.hpp:212
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:189
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:436
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:242
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:204
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:319
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:396
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:94
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:486
Base class for all descriptors.
Definition: Descriptors.hpp:22
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:491
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:352
unsigned int GetNumConnections() const override
Definition: Layer.hpp:143
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:321
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:481
DataType
Definition: Types.hpp:35
void ResetPriority() const
Definition: Layer.cpp:296
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:401
DataType GetDataType() const
Definition: Tensor.hpp:198
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
Definition: Layer.hpp:225
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:270
LayerPriority GetPriority() const
Definition: Layer.cpp:302
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:221
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
virtual void ValidateTensorShapesFromInputs()=0
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:414
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:248
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:174
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:146
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:62
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:230
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:363
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:61
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:450
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
DataType GetDataType() const
Definition: Layer.cpp:287
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:323
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:124
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:316
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:179
bool IsTensorInfoSet() const override
Definition: Layer.cpp:71
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:253
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:66
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:208
const char * GetLayerTypeAsCString(LayerType type)
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:119
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:49
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:458
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:327
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:133