ArmNN
 21.11
Layer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Layer.hpp"
6 
7 #include "Graph.hpp"
8 #include <ProfilingService.hpp>
12 
13 #include <fmt/format.h>
14 
15 #include <numeric>
16 
17 namespace armnn
18 {
19 
21 {
22  ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
23 
24  OutputSlot* const prevSlot = GetConnectedOutputSlot();
25 
26  if (prevSlot != nullptr)
27  {
28  // Disconnects parent from this.
29  prevSlot->Disconnect(*this);
30 
31  // Connects inserted layer to parent.
32  ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
33  int idx = prevSlot->Connect(layer.GetInputSlot(0));
34  prevSlot->SetEdgeStrategy(armnn::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
35 
36  // Sets tensor info for inserted layer.
37  const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
38  layer.GetOutputHandler().SetTensorInfo(tensorInfo);
39  }
40 
41  // Connects inserted layer to this.
42  layer.GetOutputSlot(0).Connect(*this);
44 }
45 
46 const InputSlot* OutputSlot::GetConnection(unsigned int index) const
47 {
48  ValidateConnectionIndex(index);
49  return m_Connections[index];
50 }
51 
53 {
54  ValidateConnectionIndex(index);
55  return m_Connections[index];
56 }
57 
58 void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
59 {
60  GetOutputHandler().SetTensorInfo(tensorInfo);
61 }
62 
64 {
65  return GetOutputHandler().GetTensorInfo();
66 }
67 
69 {
70  if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
71  {
73  }
74  return GetOutputHandler().IsTensorInfoSet();
75 }
76 
78 {
79  ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
80  return shape == m_OutputHandler.GetTensorInfo().GetShape();
81 }
82 
83 int OutputSlot::Connect(InputSlot& destination)
84 {
85  destination.SetConnection(this);
86  m_Connections.push_back(&destination);
87  m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
88  return armnn::numeric_cast<int>(m_Connections.size() - 1);
89 }
90 
92 {
93  slot.SetConnection(nullptr);
94  auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
95 
96  if (it == m_Connections.end())
97  {
98  return;
99  }
100 
101  auto idx = std::distance(m_Connections.begin(), it);
102  m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
103 
104  m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
105 }
106 
108 {
109  while (GetNumConnections() > 0)
110  {
111  InputSlot& connection = *GetConnection(0);
112  Disconnect(connection);
113  }
114 }
115 
117 {
118  while (GetNumConnections() > 0)
119  {
120  ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
121  "Cannot move connections once memory strategies have be established.");
122 
123  InputSlot& connection = *GetConnection(0);
124  Disconnect(connection);
125  destination.Connect(connection);
126  destination.GetOutputHandler().SetTensorInfo(GetOutputHandler().GetTensorInfo());
127  }
128 }
129 
131 {
132  for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
133  {
134  if (GetOwningLayer().GetOutputSlot(i) == (*this))
135  {
136  return i;
137  }
138  }
139  ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
140  return 0; // Error
141 }
142 
143 bool OutputSlot::operator==(const OutputSlot& other) const
144 {
145  bool isSame = other.GetNumConnections() == GetNumConnections();
146  if (!isSame)
147  {
148  return false;
149  }
150 
151  for (unsigned int i = 0; i < GetNumConnections(); i++)
152  {
153  isSame &= other.GetConnection(i) == GetConnection(i);
154  }
155  return isSame;
156 }
157 
158 void OutputSlot::ValidateConnectionIndex(unsigned int index) const
159 {
160  if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
161  {
162  throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
163  }
164 }
165 
167 {
168  return GetOwningLayer().GetGuid();
169 }
170 
172 {
173  m_TensorHandleFactoryId = id;
174 }
175 
177 {
178  return m_TensorHandleFactoryId;
179 }
180 
181 void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
182 {
183  m_EdgeStrategies[connectionIndex] = strategy;
184 }
185 
187 {
188  return m_EdgeStrategies[connectionIdx];
189 }
190 
191 Layer::Layer(unsigned int numInputSlots,
192  unsigned int numOutputSlots,
193  LayerType type,
194  DataLayout layout,
195  const char* name)
196 : m_OutputHandlers(numOutputSlots)
197 , m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
198 , m_LayerName(name ? name : "")
199 , m_Type(type)
200 , m_BackendId()
201 , m_BackendHint(EmptyOptional())
202 , m_Guid(profiling::ProfilingService::GetNextGuid())
203 {
204  IgnoreUnused(layout);
205  m_InputSlots.reserve(numInputSlots);
206  for (unsigned int i = 0; i < numInputSlots; ++i)
207  {
208  m_InputSlots.emplace_back(*this, i);
209  }
210 
211  m_OutputSlots.reserve(numOutputSlots);
212  for (unsigned int i = 0; i < numOutputSlots; ++i)
213  {
214  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
215  }
216 }
217 
218 Layer::Layer(unsigned int numInputSlots,
219  unsigned int numOutputSlots,
220  LayerType type,
221  const char* name)
222 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
223 {
224 }
225 
226 void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
227 {
228  for (auto&& inputSlot : GetInputSlots())
229  {
230  // The graph must be well-formed at this point.
231  ARMNN_ASSERT(inputSlot.GetConnection());
232  const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
233  dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
234  }
235 }
236 
237 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
238 {
239  for (auto&& outputHandler : m_OutputHandlers)
240  {
241  outputHandler.CollectWorkloadOutputs(dataCollector);
242  }
243 }
244 
246 {
248 }
249 
251  const IWorkloadFactory& workloadFactory,
252  const bool IsMemoryManaged)
253 {
254  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
255  {
256 
257  OutputSlot& slot = GetOutputSlot(idx);
259 
260  OutputHandler& handler = GetOutputHandler(idx);
261  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
262  {
263  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
264  }
265  else
266  {
267  ITensorHandleFactory* handleFactory;
268  handleFactory = registry.GetFactory(factoryId);
269  ARMNN_ASSERT(handleFactory);
270  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
271  }
272  }
273 }
274 
276 {
277  // Now free up the static data.
278  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
279  {
280  handle.reset();
281  });
282 }
283 
285 {
286  if (GetNumInputSlots() > 0) // Ignore the input layer.
287  {
289  }
291 }
292 
294 {
295  m_Priority = 0;
296  m_Visiting = false;
297 }
298 
300 {
301  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
302  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
303 
304  if (GetType() == LayerType::Input)
305  {
306  m_Priority = inputPrio;
307  }
308  else if (GetType() == LayerType::Output)
309  {
310  m_Priority = outputPrio;
311  }
312  else if (m_Priority == 0)
313  {
314  if (m_Visiting)
315  {
316  throw GraphValidationException("Graph has circular dependencies: cannot walk");
317  }
318 
319  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
320  {
321  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
322  if (outputSlot)
323  {
324  const Layer& input = outputSlot->GetOwningLayer();
325  return std::max(prio, input.GetPriority());
326  }
327  else
328  {
329  // unconnected input slot
330  return prio;
331  }
332  };
333 
334  m_Visiting = true;
335  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
336  m_Visiting = false;
337 
338  if (parentPrio >= outputPrio)
339  {
340  throw GraphValidationException("Graph has too many edges");
341  }
342 
343  m_Priority = parentPrio + 1U;
344  }
345 
346  return m_Priority;
347 }
348 
349 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
350 {
351  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
352 
353  for (unsigned int i=0; i<expectedConnections; ++i)
354  {
355  if (GetInputSlot(i).GetConnection() == nullptr)
356  {
358  fmt::format("Input connection #{0} must be connected "
359  "for {1} layer {2} {3}",
360  i,
362  GetNameStr(),
363  location.AsString()));
364  }
365  }
366 }
367 
368 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
369 {
372 
373  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
374  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
375  // base class, this means the implementation needs to be overridden in the specific layers for
376  // the other cases. So the missing implementation justifies the UnimplementedException.
377 
379  {
381  fmt::format("Default implementation for InferOutputShapes can only be used for "
382  "layers with the same number of input and output slots. This doesn't "
383  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
385  GetNameStr(),
388  CHECK_LOCATION().AsString()));
389  }
390  return inputShapes;
391 }
392 
393 void Layer::ValidateAndCopyShape(const TensorShape& outputShape,
394  const TensorShape& inferredShape,
395  const ShapeInferenceMethod shapeInferenceMethod,
396  const std::string& layerName,
397  const unsigned int outputSlotIndex)
398 {
399  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
400  {
401  ConditionalThrowIfNotEqual<LayerValidationException>(
402  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
403  outputShape,
404  inferredShape);
405  return;
406  }
407 
408  if (outputShape.GetDimensionality() == Dimensionality::Specified)
409  {
410  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
411  {
412  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
413  {
414  std::stringstream ss;
415  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
416  "] does not match the inferred shape at dimension index [";
417  ss << i << "] " << outputShape << " != " << inferredShape;
418  throw LayerValidationException(ss.str());
419  }
420  }
421  }
422 
423  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
424 
425  armnn::TensorInfo inferredTensorInfo(inferredShape,
426  info.GetDataType(),
427  info.GetQuantizationScale(),
428  info.GetQuantizationOffset());
429 
430  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
431 }
432 
433 void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod)
434 {
435  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
436  {
437  ConditionalThrow<LayerValidationException>(
439  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
440 
441  ConditionalThrow<LayerValidationException>(
442  outputShape.AreAllDimensionsSpecified(),
443  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
444  }
445 }
446 
448 {
449  std::string guid = std::to_string(m_Guid);
450  std::string layerType = GetLayerTypeAsCString(m_Type);
451  std::string backendId = std::string(m_BackendId);
452  if (!(guid.compare("") == 0) && !guid.empty())
453  {
454  fn("Guid", guid);
455  }
456  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
457  {
458  fn("LayerName",m_LayerName);
459  }
460  if(!(layerType.compare("") == 0) && !layerType.empty())
461  {
462  fn("LayerType",layerType);
463  }
464  if(!(backendId.compare("") == 0) && !backendId.empty())
465  {
466  fn("BackendID",backendId);
467  }
468  std::shared_ptr<ActivationDescriptor>
469  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
470 
471  if (activationDescPtr)
472  {
473  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
474  }
475 }
476 
477 // default implementation of ExecuteStrategy
478 void Layer::ExecuteStrategy(IStrategy& strategy) const
479 {
480  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
481 }
482 
483 } // namespace armnn
void DisconnectAll()
Definition: Layer.cpp:107
virtual void ReleaseConstantData()
Definition: Layer.cpp:275
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:77
void Insert(Layer &layer)
Definition: Layer.cpp:20
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:181
DataLayout
Definition: Types.hpp:49
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
std::string AsString() const
Definition: Exceptions.hpp:29
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:368
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:166
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:298
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int LayerPriority
Definition: Layer.hpp:207
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:186
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:433
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:237
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:393
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:91
Base class for all descriptors.
Definition: Descriptors.hpp:22
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:349
unsigned int GetNumConnections() const override
Definition: Layer.hpp:138
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:478
DataType
Definition: Types.hpp:35
void ResetPriority() const
Definition: Layer.cpp:293
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:394
DataType GetDataType() const
Definition: Tensor.hpp:198
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
Definition: Layer.hpp:220
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
LayerPriority GetPriority() const
Definition: Layer.cpp:299
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:218
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
virtual void ValidateTensorShapesFromInputs()=0
Layer & GetOwningLayer() const
Definition: Layer.hpp:52
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:407
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:245
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:171
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:143
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:59
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:349
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:447
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
DataType GetDataType() const
Definition: Layer.cpp:284
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:119
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:176
bool IsTensorInfoSet() const override
Definition: Layer.cpp:68
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:250
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:208
const char * GetLayerTypeAsCString(LayerType type)
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:116
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:46
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:443
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:322
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:130