ArmNN
 23.08
Layer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Layer.hpp"
6 
7 #include "Graph.hpp"
8 
11 
13 
15 
16 #include <client/include/IProfilingService.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <numeric>
21 
22 namespace armnn
23 {
24 
25 // Instantiate the static member variable
26 NullDescriptor Layer::m_NullDescriptor;
27 
29 {
30  switch (layer.GetType())
31  {
35  {
36  ARMNN_ASSERT(layer.GetNumInputSlots() == 2 ||
37  layer.GetNumInputSlots() == 3);
38  break;
39  }
40  default:
41  {
42  ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
43  break;
44  }
45  }
46 }
47 
49 {
50  ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
51 
52  OutputSlot* const prevSlot = GetConnectedOutputSlot();
53 
54  if (prevSlot != nullptr)
55  {
56  // Disconnects parent from this.
57  prevSlot->Disconnect(*this);
58 
60 
61  // Connects inserted layer to parent.
62  int idx = prevSlot->Connect(layer.GetInputSlot(0));
63  prevSlot->SetEdgeStrategy(armnn::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
64 
65  // Sets tensor info for inserted layer.
66  const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
67  layer.GetOutputHandler().SetTensorInfo(tensorInfo);
68  }
69 
70  // Connects inserted layer to this.
71  layer.GetOutputSlot(0).Connect(*this);
73 }
74 
75 const InputSlot* OutputSlot::GetConnection(unsigned int index) const
76 {
77  ValidateConnectionIndex(index);
78  return m_Connections[index];
79 }
80 
82 {
83  ValidateConnectionIndex(index);
84  return m_Connections[index];
85 }
86 
87 void OutputSlot::SetTensorInfo(const TensorInfo& tensorInfo)
88 {
89  GetOutputHandler().SetTensorInfo(tensorInfo);
90 }
91 
93 {
95 }
96 
98 {
99  if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
100  {
102  }
104 }
105 
107 {
108  ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
109  return shape == m_OutputHandler.GetTensorInfo().GetShape();
110 }
111 
113 {
114  destination.SetConnection(this);
115  m_Connections.push_back(&destination);
116  m_EdgeStrategies.push_back(EdgeStrategy::Undefined);
117  return armnn::numeric_cast<int>(m_Connections.size() - 1);
118 }
119 
121 {
122  slot.SetConnection(nullptr);
123  auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
124 
125  if (it == m_Connections.end())
126  {
127  return;
128  }
129 
130  auto idx = std::distance(m_Connections.begin(), it);
131  m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
132 
133  m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
134 }
135 
137 {
138  while (GetNumConnections() > 0)
139  {
140  InputSlot& connection = *GetConnection(0);
141  Disconnect(connection);
142  }
143 }
144 
146 {
147  while (GetNumConnections() > 0)
148  {
149  ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
150  "Cannot move connections once memory strategies have be established.");
151 
152  InputSlot& connection = *GetConnection(0);
153  Disconnect(connection);
154  destination.Connect(connection);
156  }
157 }
158 
160 {
161  for (unsigned int i = 0; i < GetOwningLayer().GetNumOutputSlots(); i++)
162  {
163  if (GetOwningLayer().GetOutputSlot(i) == (*this))
164  {
165  return i;
166  }
167  }
168  ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
169  return 0; // Error
170 }
171 
172 bool OutputSlot::operator==(const OutputSlot& other) const
173 {
174  bool isSame = other.GetNumConnections() == GetNumConnections();
175  if (!isSame)
176  {
177  return false;
178  }
179 
180  for (unsigned int i = 0; i < GetNumConnections(); i++)
181  {
182  isSame &= other.GetConnection(i) == GetConnection(i);
183  }
184  return isSame;
185 }
186 
187 void OutputSlot::ValidateConnectionIndex(unsigned int index) const
188 {
189  if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
190  {
191  throw InvalidArgumentException((fmt::format("GetConnection: Invalid index {} provided", index)));
192  }
193 }
194 
196 {
197  return GetOwningLayer().GetGuid();
198 }
199 
201 {
202  m_TensorHandleFactoryId = id;
203 }
204 
206 {
207  return m_TensorHandleFactoryId;
208 }
209 
210 void OutputSlot::SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
211 {
212  m_EdgeStrategies[connectionIndex] = strategy;
213 }
214 
216 {
217  return m_EdgeStrategies[connectionIdx];
218 }
219 
220 Layer::Layer(unsigned int numInputSlots,
221  unsigned int numOutputSlots,
222  LayerType type,
223  DataLayout layout,
224  const char* name)
225 : m_OutputHandlers(numOutputSlots)
226 , m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
227 , m_LayerName(name ? name : "")
228 , m_Type(type)
229 , m_BackendId()
230 , m_BackendHint(EmptyOptional())
231 , m_Guid(arm::pipe::IProfilingService::GetNextGuid())
232 {
233  IgnoreUnused(layout);
234  m_InputSlots.reserve(numInputSlots);
235  for (unsigned int i = 0; i < numInputSlots; ++i)
236  {
237  m_InputSlots.emplace_back(*this, i);
238  }
239 
240  m_OutputSlots.reserve(numOutputSlots);
241  for (unsigned int i = 0; i < numOutputSlots; ++i)
242  {
243  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
244  }
245 }
246 
247 Layer::Layer(unsigned int numInputSlots,
248  unsigned int numOutputSlots,
249  LayerType type,
250  const char* name)
251 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
252 {
253 }
254 
255 void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
256 {
257  for (auto&& inputSlot : GetInputSlots())
258  {
259  // The graph must be well-formed at this point.
260  ARMNN_ASSERT(inputSlot.GetConnection());
261  const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
262 
263  if (inputSlot.IsTensorInfoOverridden() && outputHandler.GetData())
264  {
265  auto handler = outputHandler.GetData()->DecorateTensorHandle(inputSlot.GetTensorInfo());
266 
267  if (handler)
268  {
269  // Add overridden TensorHandle
270  dataCollector.Push(handler.get(), inputSlot.GetTensorInfo());
271  continue;
272  }
273  }
274  // Add default TensorHandle
275  dataCollector.Push(outputHandler.GetData(), inputSlot.GetTensorInfo());
276  }
277 }
278 
279 void Layer::CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const
280 {
281  for (auto&& outputHandler : m_OutputHandlers)
282  {
283  outputHandler.CollectWorkloadOutputs(dataCollector);
284  }
285 }
286 
288 {
290 }
291 
293  const IWorkloadFactory& workloadFactory,
294  const bool IsMemoryManaged)
295 {
296  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
297  {
298 
299  OutputSlot& slot = GetOutputSlot(idx);
301 
302  OutputHandler& handler = GetOutputHandler(idx);
303  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
304  {
305  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
306  }
307  else
308  {
309  ITensorHandleFactory* handleFactory;
310  handleFactory = registry.GetFactory(factoryId);
311  ARMNN_ASSERT(handleFactory);
312  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
313  }
314  }
315 }
316 
318 {
319  // Now free up the static data.
320  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
321  {
322  handle.reset();
323  });
324 }
325 
327 {
328  if (GetNumInputSlots() > 0) // Ignore the input layer.
329  {
330  return GetInputSlot(0).GetTensorInfo().GetDataType();
331  }
333 }
334 
336 {
337  m_Priority = 0;
338  m_Visiting = false;
339 }
340 
342 {
343  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
344  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
345 
346  if (GetType() == LayerType::Input)
347  {
348  m_Priority = inputPrio;
349  }
350  else if (GetType() == LayerType::Output)
351  {
352  m_Priority = outputPrio;
353  }
354  else if (m_Priority == 0)
355  {
356  if (m_Visiting)
357  {
358  throw GraphValidationException("Graph has circular dependencies: cannot walk");
359  }
360 
361  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
362  {
363  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
364  if (outputSlot)
365  {
366  const Layer& input = outputSlot->GetOwningLayer();
367  return std::max(prio, input.GetPriority());
368  }
369  else
370  {
371  // unconnected input slot
372  return prio;
373  }
374  };
375 
376  m_Visiting = true;
377  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
378  m_Visiting = false;
379 
380  if (parentPrio >= outputPrio)
381  {
382  throw GraphValidationException("Graph has too many edges");
383  }
384 
385  m_Priority = parentPrio + 1U;
386  }
387 
388  return m_Priority;
389 }
390 
391 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
392 {
393  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
394 
395  for (unsigned int i=0; i<expectedConnections; ++i)
396  {
397  if (GetInputSlot(i).GetConnection() == nullptr)
398  {
400  fmt::format("Input connection #{0} must be connected "
401  "for {1} layer {2} {3}",
402  i,
404  GetNameStr(),
405  location.AsString()));
406  }
407  }
408 }
409 
410 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
411 {
414 
415  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
416  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
417  // base class, this means the implementation needs to be overridden in the specific layers for
418  // the other cases. So the missing implementation justifies the UnimplementedException.
419 
421  {
423  fmt::format("Default implementation for InferOutputShapes can only be used for "
424  "layers with the same number of input and output slots. This doesn't "
425  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
427  GetNameStr(),
430  CHECK_LOCATION().AsString()));
431  }
432  return inputShapes;
433 }
434 
435 void Layer::ValidateAndCopyShape(const TensorShape& outputShape,
436  const TensorShape& inferredShape,
437  const ShapeInferenceMethod shapeInferenceMethod,
438  const std::string& layerName,
439  const unsigned int outputSlotIndex)
440 {
441  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
442  {
443  if (m_AllowExpandedDims)
444  {
445  std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
446  std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
447 
448  if (outputDims.size() != inferredDims.size())
449  {
450  std::stringstream ss;
451  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
452  "] does not match the inferred shape. ";
453  ss << outputShape << " != " << inferredShape;
454  throw LayerValidationException(ss.str());
455  }
456  for (unsigned int i = 0; i < outputDims.size(); ++i)
457  {
458  if (outputDims[i] != inferredDims[i])
459  {
460  std::stringstream ss;
461  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
462  "] does not match the inferred shape at dimension index [";
463  ss << i << "] " << outputShape << " != " << inferredShape;
464  throw LayerValidationException(ss.str());
465  }
466  }
467  return;
468  }
469  else
470  {
471  ConditionalThrowIfNotEqual<LayerValidationException>(
472  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
473  outputShape,
474  inferredShape);
475  return;
476  }
477  }
478 
479  if (outputShape.GetDimensionality() == Dimensionality::Specified)
480  {
481  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
482  {
483  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
484  {
485  std::stringstream ss;
486  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
487  "] does not match the inferred shape at dimension index [";
488  ss << i << "] " << outputShape << " != " << inferredShape;
489  throw LayerValidationException(ss.str());
490  }
491  }
492  }
493 
494  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
495 
496  armnn::TensorInfo inferredTensorInfo(inferredShape,
497  info.GetDataType(),
498  info.GetQuantizationScale(),
499  info.GetQuantizationOffset());
500 
501  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
502 }
503 
504 void Layer::VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod)
505 {
506  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
507  {
508  ConditionalThrow<LayerValidationException>(
510  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
511 
512  ConditionalThrow<LayerValidationException>(
513  outputShape.AreAllDimensionsSpecified(),
514  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
515  }
516 }
517 
519 {
520  std::string guid = std::to_string(m_Guid);
521  std::string layerType = GetLayerTypeAsCString(m_Type);
522  std::string backendId = std::string(m_BackendId);
523  if (!(guid.compare("") == 0) && !guid.empty())
524  {
525  fn("Guid", guid);
526  }
527  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
528  {
529  fn("LayerName",m_LayerName);
530  }
531  if(!(layerType.compare("") == 0) && !layerType.empty())
532  {
533  fn("LayerType",layerType);
534  }
535  if(!(backendId.compare("") == 0) && !backendId.empty())
536  {
537  fn("BackendID",backendId);
538  }
539  std::shared_ptr<ActivationDescriptor>
540  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
541 
542  if (activationDescPtr)
543  {
544  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
545  }
546 }
547 
548 // default implementation of ExecuteStrategy
549 void Layer::ExecuteStrategy(IStrategy& strategy) const
550 {
551  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
552 }
553 
555 {
556  const Layer *constThis = const_cast<const Layer*>(this);
557  ConstantTensors res;
558 
559  ImmutableConstantTensors immutableData = constThis->GetConstantTensorsByRef();
560  for (auto i : immutableData)
561  {
562  res.push_back(const_cast<std::shared_ptr<ConstTensorHandle>&>(i.get()));
563  }
564  return res;
565 }
566 
568 {
569  return m_OwningLayer;
570 }
571 
573 {
574  return m_OwningLayer;
575 }
576 
578 {
579  return m_OwningLayer;
580 }
581 
583 {
584  return m_OwningLayer;
585 }
586 
587 void InputSlot::SetTensorInfo(const TensorInfo tensorInfo)
588 {
589  m_OverriddenTensorInfo = Optional<TensorInfo>(tensorInfo);
590 }
591 
593 {
594  if (m_OverriddenTensorInfo.has_value())
595  {
596  return m_OverriddenTensorInfo.value();
597  }
598  else
599  {
600  return GetConnection()->GetTensorInfo();
601  }
602 }
603 
605 {
606  return m_OverriddenTensorInfo.has_value() || (GetConnection() && GetConnection()->IsTensorInfoSet());
607 }
608 
610 {
611  return m_OverriddenTensorInfo.has_value();
612 }
613 
614 } // namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::OutputSlot::SetTensorHandleFactory
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:200
armnn::InputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition: Layer.cpp:587
arm
Definition: BackendRegistry.hpp:15
armnn::OutputSlot::operator==
bool operator==(const OutputSlot &other) const
Definition: Layer.cpp:172
armnn::Optional
Definition: Optional.hpp:270
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
WorkloadData.hpp
armnnUtils::SqueezeDims
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
Definition: TensorUtils.cpp:193
armnn::OutputSlot::ValidateTensorShape
bool ValidateTensorShape(const TensorShape &shape) const
Definition: Layer.cpp:106
armnn::OutputSlot::GetOutputHandler
const OutputHandler & GetOutputHandler() const
Definition: Layer.hpp:139
armnn::Layer::OperateOnConstantTensors
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:319
armnn::Layer::m_AdditionalInfoObject
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:427
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
armnn::OutputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
armnn::TensorShape::GetDimensionSpecificity
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
Definition: Tensor.cpp:211
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::LayerPriority
unsigned int LayerPriority
Definition: Layer.hpp:227
armnn::TensorInfo
Definition: Tensor.hpp:152
Graph.hpp
armnn::OutputHandler::SetTensorInfo
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
Definition: OutputHandler.cpp:15
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::Layer::ValidateAndCopyShape
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
Definition: Layer.cpp:435
armnn::OutputSlot::DisconnectAll
void DisconnectAll()
Definition: Layer.cpp:136
armnn::InputSlot::GetOwningIConnectableLayer
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:577
armnn::IConnectableLayer::ConstantTensors
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:136
armnn::TensorShape::AreAllDimensionsSpecified
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:440
armnn::OutputHandler::CreateTensorHandles
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
Definition: OutputHandler.cpp:21
armnn::Layer::GetConstantTensorsByRef
virtual ConstantTensors GetConstantTensorsByRef() override final
Definition: Layer.cpp:554
armnn::OutputSlot::GetOwningIConnectableLayer
const IConnectableLayer & GetOwningIConnectableLayer() const override
Definition: Layer.cpp:567
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:258
armnn::OutputSlot::Connect
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
armnn::IStrategy
Definition: IStrategy.hpp:16
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::CheckLocation::AsString
std::string AsString() const
Definition: Exceptions.hpp:29
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::OutputSlot::IsTensorInfoSet
bool IsTensorInfoSet() const override
Definition: Layer.cpp:97
armnn::OutputSlot::GetEdgeStrategyForConnection
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:215
armnn::OutputHandler::GetData
ITensorHandle * GetData() const
Gets the allocated tensor memory.
Definition: OutputHandler.hpp:46
armnn::Layer::Layer
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:247
NumericCast.hpp
armnn::Layer::CreateTensorHandles
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:292
armnn::OutputSlot::GetOwningLayerGuid
LayerGuid GetOwningLayerGuid() const override
Definition: Layer.cpp:195
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
TensorUtils.hpp
armnn::Layer
Definition: Layer.hpp:230
armnn::IConnectableLayer::ImmutableConstantTensors
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
Definition: INetwork.hpp:141
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
armnn::ITensorHandle::DecorateTensorHandle
virtual std::shared_ptr< ITensorHandle > DecorateTensorHandle(const TensorInfo &tensorInfo)
Returns a decorated version of this TensorHandle allowing us to override the TensorInfo for it.
Definition: ITensorHandle.hpp:98
armnn::Layer::SerializeLayerParameters
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
Definition: Layer.cpp:518
armnn::OutputSlot::CalculateIndexOnOwner
unsigned int CalculateIndexOnOwner() const override
Definition: Layer.cpp:159
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::CheckLocation
Definition: Exceptions.hpp:14
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::OutputSlot::Disconnect
void Disconnect(InputSlot &slot)
Definition: Layer.cpp:120
armnn::OutputHandler::IsTensorInfoSet
bool IsTensorInfoSet() const
Returns true if SetTensorInfo() has been called at least once on this.
Definition: OutputHandler.hpp:58
armnn::WorkloadDataCollector
Definition: WorkloadDataCollector.hpp:15
armnn::OutputSlot::GetNumConnections
unsigned int GetNumConnections() const override
Definition: Layer.hpp:158
armnn::EmptyOptional
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
armnn::InputSlot::IsTensorInfoSet
bool IsTensorInfoSet() const override
Returns true if this InputSlot either has an overridden TensorInfo for this InputSlot that was set th...
Definition: Layer.cpp:604
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::IWorkloadFactory
Definition: WorkloadFactory.hpp:22
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::QueueDescriptor
Definition: WorkloadData.hpp:24
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::Layer::GetOutputHandler
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:245
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:335
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Layer::VerifyShapeInferenceType
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Definition: Layer.cpp:504
armnn::ITensorHandleFactory
Definition: ITensorHandleFactory.hpp:46
armnn::QueueDescriptor::m_AdditionalInfoObject
void * m_AdditionalInfoObject
Definition: WorkloadData.hpp:28
armnn::Layer::SetAdditionalInfo
void SetAdditionalInfo(QueueDescriptor &descriptor) const
Definition: Layer.cpp:287
armnn::StringifyLayerParameters::Serialize
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
Definition: SerializeLayerParameters.hpp:25
armnn::InputSlot::GetConnection
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:219
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn::BaseDescriptor
Base class for all descriptors.
Definition: Descriptors.hpp:22
armnn::InputSlot::IsTensorInfoOverridden
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition: Layer.cpp:609
armnn::OutputHandler
Definition: OutputHandler.hpp:28
armnn::BoostLogSeverityMapping::info
@ info
armnn::WorkloadDataCollector::Push
void Push(ITensorHandle *handle, const TensorInfo &info)
Definition: WorkloadDataCollector.hpp:24
armnn::Layer::ReleaseConstantData
virtual void ReleaseConstantData()
Definition: Layer.cpp:317
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn::OutputSlot::MoveAllConnections
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:145
armnn::Layer::GetNameStr
const std::string & GetNameStr() const
Definition: Layer.hpp:240
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::InputSlot
Definition: Layer.hpp:42
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::ShapeInferenceMethod::InferAndValidate
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
armnn::InputSlot::Insert
void Insert(Layer &layer)
Definition: Layer.cpp:48
armnn::AssertNumberOfInputSlots
void AssertNumberOfInputSlots(Layer &layer)
Definition: Layer.cpp:28
armnn::LayerType::FullyConnected
@ FullyConnected
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Layer::GetDataType
DataType GetDataType() const
Definition: Layer.cpp:326
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::Dimensionality::Specified
@ Specified
TensorHandle.hpp
armnn::InputSlot::SetConnection
void SetConnection(OutputSlot *source)
Links the slot to an output slot or breaks an existing link if passing nullptr.
Definition: Layer.hpp:63
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::Layer::ExecuteStrategy
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
Definition: Layer.cpp:549
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::OutputSlot::SetEdgeStrategy
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:210
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::Layer::ValidateTensorShapesFromInputs
virtual void ValidateTensorShapesFromInputs()=0
armnn::GraphValidationException
Definition: Exceptions.hpp:110
Layer.hpp
armnn::Layer::InferOutputShapes
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
Definition: Layer.cpp:410
armnn::Layer::GetPriority
LayerPriority GetPriority() const
Definition: Layer.cpp:341
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::Layer::VerifyLayerConnections
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
Definition: Layer.cpp:391
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::LayerType::Input
@ Input
armnn::OutputHandler::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Gets the matching TensorInfo for the output.
Definition: OutputHandler.hpp:42
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::ShapeInferenceMethod
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
Definition: Types.hpp:234
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:483
armnn::OutputSlot::GetConnection
const InputSlot * GetConnection(unsigned int index) const override
Definition: Layer.cpp:75
armnn::IStrategy::ExecuteStrategy
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
armnn::OutputHandler::CollectWorkloadOutputs
void CollectWorkloadOutputs(WorkloadDataCollector &dataCollector) const
Fill the outputs for a given queue descriptor.
Definition: OutputHandler.cpp:33
armnn::LayerType::Output
@ Output
armnn::Layer::ResetPriority
void ResetPriority() const
Definition: Layer.cpp:335
armnn::DataLayout::NCHW
@ NCHW