ArmNN
 23.11
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< ConstantLayer >
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false, bool allowExpandedDims=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print (bool extended=false) const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots. More...
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void SetLayersOutOfOrder ()
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Friends

class SubgraphView
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 57 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 58 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 53 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 54 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false,
bool  allowExpandedDims = false 
)
inline

Definition at line 98 of file Graph.hpp.

99  : m_LayersInOrder(true)
100  , m_AllowExpandedDims(allowExpandedDims)
101  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
103  , m_Profiler(std::make_shared<IProfiler>())
104  {}

References armnn::InferAndValidate, and armnn::ValidateOnly.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 27 of file Graph.cpp.

28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 
56  if (otherInputSlot->IsTensorInfoOverridden())
57  {
58  inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59  }
60  outputSlot->Connect(inputSlot);
61  }
62  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63  ++outputSlot;
64  }
65  }
66 }

References Layer::BeginOutputSlots(), Layer::Clone(), Layer::GetInputSlot(), and InputSlot::SetTensorInfo().

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 110 of file Graph.hpp.

111  {
112  *this = std::move(other);
113  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 135 of file Graph.hpp.

136  {
137  ForEachLayer([](Layer* layer)
138  {
139  delete layer;
140  });
141  }

References Graph::ForEachLayer().

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 329 of file Graph.cpp.

331 {
332  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
333  // connections to other layers).
334  auto MayNeedCompatibilityLayer = [](const Layer& layer)
335  {
336  // All layers should have been associated with a valid compute device at this point.
337  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
338  // Does not need another compatibility layer if a copy or import layer is already present.
339  return layer.GetType() != LayerType::MemCopy &&
340  layer.GetType() != LayerType::MemImport;
341  };
342 
343  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
344  {
345  return strategy == EdgeStrategy::CopyToTarget ||
346  strategy == EdgeStrategy::ExportToTarget;
347  };
348 
349  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
350  {
351  ARMNN_ASSERT(srcLayer);
352 
353  if (!MayNeedCompatibilityLayer(*srcLayer))
354  {
355  // The current layer does not need copy layers, move to the next one
356  return;
357  }
358 
359  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
360  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
361  {
362  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
363  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
364  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
365  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
366  {
367  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
368  ARMNN_ASSERT(dstInputSlot);
369 
370  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
372  "Undefined memory strategy found while adding copy layers for compatibility");
373 
374  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
375  if (MayNeedCompatibilityLayer(dstLayer) &&
376  IsCompatibilityStrategy(strategy))
377  {
378  // A copy layer is needed in between the source and destination layers.
379  // Record the operation rather than attempting to modify the graph as we go.
380  // (invalidating iterators)
381  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
382  srcLayer->GetName(),
383  srcOutputIndex,
384  dstLayer.GetName(),
385  dstInputSlot->GetSlotIndex());
386  Layer* compLayer = nullptr;
387  if (strategy == EdgeStrategy::CopyToTarget)
388  {
389  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
390  }
391  else
392  {
393  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
394  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
395  }
396 
397  compLayer->SetBackendId(dstLayer.GetBackendId());
398 
399  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
400  auto backendIt = backends.find(dstLayer.GetBackendId());
401  if (backendIt != backends.end() &&
402  backendIt->second &&
403  backendIt->second->SupportsTensorAllocatorAPI())
404  {
405  auto backend = backendIt->second.get();
406  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
407  bool found = false;
408 
409  for (auto preference : tensorHandleFactoryIds)
410  {
411  auto factory = registry.GetFactory(preference);
412  if (factory)
413  {
414  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
415  auto srcFactory = registry.GetFactory(srcPref);
416 
417  if (srcFactory)
418  {
419  bool canExportImport =
420  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
421 
422  if (factory->SupportsMapUnmap() || canExportImport)
423  {
424  compOutputSlot.SetTensorHandleFactory(preference);
425  found = true;
426  break;
427  }
428  }
429  }
430  }
431 
432  if (!found)
433  {
434  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
435  }
436  }
437  else
438  {
439  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
440  }
441 
442  // The output strategy of a compatibility layer is always DirectCompatibility.
443  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
444 
445  // Recalculate the connection index on the previous layer as we have just inserted into it.
446  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
447  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
448  std::find(newSourceConnections.begin(),
449  newSourceConnections.end(),
450  &compLayer->GetInputSlot(0)));
451 
452  // The input strategy of a compatibility layer is always DirectCompatibilty.
453  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
455  }
456  }
457  }
458  });
459 }

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetInputSlot(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, Layer::SetBackendId(), OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by armnn::Optimize().

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 456 of file Graph.hpp.

457 {
458  m_LayersInOrder = m_LayersInOrder &&
459  ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
460  LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
461 
462  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
463  layer->SetAllowExpandedDims(m_AllowExpandedDims);
464 
465  NotifyObservables(GraphEvent::LayerAdded, layer);
466 
467  return layer;
468 }

References armnn::Input, armnn::LayerAdded, and armnn::Output.

Referenced by Layer::CloneBase(), and FuseBatchNorm< ConvLayer, ArmnnType, T >::Run().

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 207 of file Graph.cpp.

208 {
209  // Layers must be sorted in topological order
210  ARMNN_ASSERT(m_LayersInOrder);
211  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
212 
213  std::unordered_set<const ITensorHandle*> preallocatedTensors;
214  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
215 
216  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
217  // is a TensorHandle, the function just returns it
218  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
219  {
220  ITensorHandle* ancestor = subTensorHandle;
221  while (ancestor && ancestor->GetParent())
222  {
223  ancestor = ancestor->GetParent();
224  }
225  return ancestor;
226  };
227 
228  // Checks whether a TensorHandle has been pre-allocated
229  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
230  {
231  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
232  };
233 
234  // Constant tensor handles need to last from the beginning of execution till the end,
235  // therefore we pre-allocate them upfront
236  for (auto&& layer : m_Layers)
237  {
238  if (layer->GetType() == LayerType::Constant)
239  {
240  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
241  {
242  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
243 
244  if (tensorHandle && !IsPreallocated(tensorHandle))
245  {
246  tensorHandle->Allocate();
247  preallocatedTensors.insert(tensorHandle);
248  }
249  }
250  }
251  }
252 
253  // Iterate over the network in topological order
254  for (auto&& layer : m_Layers)
255  {
256  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
257  // The first time we encounter a new tensor handle, we start managing its lifetime.
258  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
259  {
260  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
261 
262  if (tensorHandle && !IsPreallocated(tensorHandle))
263  {
264  unsigned int numConnections = slot->GetNumConnections();
265  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
266  {
267  handleReferenceCounts[tensorHandle] = numConnections;
268  tensorHandle->Manage();
269  if (handleReferenceCounts[tensorHandle] == 0u)
270  {
271  // if nobody consumes this tensor we call Allocate()
272  tensorHandle->Allocate();
273  }
274  }
275  else
276  {
277  handleReferenceCounts[tensorHandle] += numConnections;
278  }
279  }
280  }
281 
282  // Loop through the input slots in the same layer and decrement the reference counter associated
283  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
284  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
285  {
286  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
287  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
288 
289  if (tensorHandle && !IsPreallocated(tensorHandle))
290  {
291  --handleReferenceCounts[tensorHandle];
292 
293  if (handleReferenceCounts[tensorHandle] == 0u)
294  {
295  // Stop managing lifetime of tensor handle
296  tensorHandle->Allocate();
297  handleReferenceCounts.erase(tensorHandle);
298  }
299  }
300  }
301  }
302 
303  return Status::Success;
304 }

References ITensorHandle::Allocate(), ARMNN_ASSERT, ARMNN_SCOPED_PROFILING_EVENT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 219 of file Graph.hpp.

219  {
220  m_Views[notifyOnEvent].emplace_back(observable);
221  }

Referenced by GraphObservable< std::string >::GraphObservable().

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 169 of file Graph.hpp.

169 { return m_Layers.begin(); }

Referenced by Graph::cbegin(), armnn::Optimize(), and Optimizer::Pass().

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 174 of file Graph.hpp.

174 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 179 of file Graph.hpp.

179 { return begin(); }

References Graph::begin().

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 181 of file Graph.hpp.

181 { return end(); }

References Graph::end().

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 223 of file Graph.hpp.

223  {
224  m_Views[notifyOnEvent].remove(observable);
225  }

Referenced by GraphObservable< std::string >::~GraphObservable().

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 171 of file Graph.hpp.

171 { return m_Layers.end(); }

Referenced by Graph::cend(), armnn::Optimize(), and Optimizer::Pass().

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 176 of file Graph.hpp.

176 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

Deletes the layer at the specified position.

Definition at line 504 of file Graph.hpp.

505 {
506  NotifyObservables(GraphEvent::LayerErased, *pos);
507 
508  delete *pos;
509 }

References armnn::LayerErased.

Referenced by armnn::ApplyBackendOptimizations(), Graph::EraseLayer(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 512 of file Graph.hpp.

513 {
514  ARMNN_ASSERT(layer != nullptr);
515  EraseLayer(GetPosInGraph(*layer));
516  layer = nullptr;
517 }

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

Referenced by Graph::AddCompatibilityLayers(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 192 of file Graph.hpp.

192 { return InputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 187 of file Graph.hpp.

187 { return m_InputIds.size(); }

Referenced by Graph::InputLayersAccessor::end(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

Definition at line 198 of file Graph.hpp.

198 { return m_Layers.size(); }

Referenced by LoadedNetwork::EnqueueWorkload().

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 188 of file Graph.hpp.

188 { return m_OutputIds.size(); }

Referenced by Graph::OutputLayersAccessor::begin(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 196 of file Graph.hpp.

196 { return OutputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 448 of file Graph.hpp.

449 {
450  auto it = m_PosInGraphMap.find(&layer);
451  ARMNN_ASSERT(it != m_PosInGraphMap.end());
452  return it->second;
453 }

References ARMNN_ASSERT.

Referenced by Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 692 of file Graph.cpp.

693 {
694  return m_Profiler;
695 }

Referenced by armnn::Optimize().

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 604 of file Graph.cpp.

605 {
606  for (auto&& layer : TopologicalSort())
607  {
608  for (auto&& input : layer->GetInputSlots())
609  {
610  const IOutputSlot* source = input.GetConnectedOutputSlot();
611  if (source == NULL)
612  {
613  // Throws exception due to a layer input not being connected to an output slot.
614  // Verifies input slot weights and bias are set for FullyConnected layers.
615  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
616  }
617 
618  if (!source->IsTensorInfoSet())
619  {
620  std::ostringstream message;
621  message << "Output slot TensorInfo not set on "
622  << GetLayerTypeAsCString(layer->GetType())
623  << " layer "
624  << std::quoted(layer->GetName());
625  throw LayerValidationException(message.str());
626  }
627  }
628 
629  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
630  {
631  layer->ValidateTensorShapesFromInputs();
632  }
633  }
634 }

References armnn::GetLayerTypeAsCString(), IOutputSlot::IsTensorInfoSet(), Graph::TopologicalSort(), and armnn::ValidateOnly.

Referenced by armnn::Optimize().

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 471 of file Graph.hpp.

472 {
473  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
474  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
475  const Iterator pos = (parentOut != nullptr)
476  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
477  : GetPosInGraph(insertBefore.GetOwningLayer());
478  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
479  insertBefore.Insert(*layer);
480 
481  NotifyObservables(GraphEvent::LayerAdded, layer);
482 
483  return layer;
484 }

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), and AddBroadcastReshapeLayerImpl::Run().

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 487 of file Graph.hpp.

488 {
489  Layer& owningLayer = insertAfter.GetOwningLayer();
490 
491  const Iterator pos = std::next(GetPosInGraph(owningLayer));
492  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
493 
494  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
495 
496  insertAfter.MoveAllConnections(layer->GetOutputSlot());
497  insertAfter.Connect(layer->GetInputSlot(0));
498 
499  NotifyObservables(GraphEvent::LayerAdded, layer);
500 
501  return layer;
502 }

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 115 of file Graph.hpp.

116  {
117  m_InputIds = std::move(other.m_InputIds);
118  m_OutputIds = std::move(other.m_OutputIds);
119  m_LayersInOrder = std::move(other.m_LayersInOrder);
120  m_Views = std::move(other.m_Views);
121  m_Profiler = std::move(other.m_Profiler);
122  m_AllowExpandedDims = other.m_AllowExpandedDims;
123  m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
124  other.ForEachLayer([this](Layer* otherLayer)
125  {
126  otherLayer->Reparent(*this, m_Layers.end());
127  });
128 
129  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
130  ARMNN_ASSERT(other.m_Layers.empty());
131 
132  return *this;
133  }

References ARMNN_ASSERT, and Layer::Reparent().

◆ Print()

Status Print ( bool  extended = false) const

Definition at line 68 of file Graph.cpp.

69 {
70  if (m_Layers.empty())
71  {
72  ARMNN_LOG(info) << "\n Graph is empty.\n";
73  return Status::Success;
74  }
75  ARMNN_LOG(info) << "\n";
76  ARMNN_LOG(info) << "Walking Pattern: \n";
77 
78  for (auto&& it : TopologicalSort())
79  {
80  auto numInputSlots = it->GetNumInputSlots();
81  auto numOutputSlots = it->GetNumOutputSlots();
82 
83  std::string guid;
84  if (extended)
85  {
86  guid += ":";
87  guid += std::to_string(it->GetGuid());
88  }
89  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
90  << ":" << it->GetBackendId().Get()
91  << guid
92  << " has " << numInputSlots << " input slots"
93  << " and " << numOutputSlots << " output slots.";
94 
95  for (auto i : it->GetInputSlots())
96  {
97  std::ostringstream message;
98  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
99  unsigned int numDims = inputTensorShape.GetNumDimensions();
100 
101  message << "The input slot has shape [ ";
102  for (unsigned int dim=0; dim < numDims; dim++)
103  {
104  message << inputTensorShape[dim] << ",";
105  }
106  message << " ]";
107  if (extended)
108  {
109  message << " Scale: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationScale();
110  message << " Offset: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationOffset();
111  message << " The input slot is connected to: ";
112  message << i.GetConnectedOutputSlot()->GetOwningIConnectableLayer().GetGuid();
113  }
114  ARMNN_LOG(info) << message.str();
115  }
116 
117  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
118  {
119  const armnn::Layer *layer = it;
120  std::ostringstream message;
121  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
122  unsigned int numDims = outputTensorShape.GetNumDimensions();
123 
124  message << "The output slot has shape [ ";
125  for (unsigned int dim=0; dim < numDims; dim++)
126  {
127  message << outputTensorShape[dim] << ",";
128  }
129  message << " ]";
130  if (extended)
131  {
132  message << " Scale: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationScale();
133  message << " Offset: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationOffset();
134  message << " The output slot is connected to: ";
135  message << layer->GetOutputSlots()[i].GetConnection(0)->GetOwningIConnectableLayer().GetGuid();
136  }
137  ARMNN_LOG(info) << message.str();
138  }
139  ARMNN_LOG(info) << "\n";
140  }
141  ARMNN_LOG(info) << "\n\n";
142 
143  return Status::Success;
144 }

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder().

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 146 of file Graph.cpp.

147 {
148  {
149  DotGraph graph(stream, "Optimized");
150 
151  {
152  // Default node attributes:
153  DotDefaults nodes(stream, "node");
154  nodes.GetAttributeSet()
155  .AddAttribute("shape", "record");
156  }
157 
158  {
159  // Default edge attributes:
160  DotDefaults edges(stream, "edge");
161  edges.GetAttributeSet()
162  .AddAttribute("fontsize", 8)
163  .AddAttribute("fontcolor", "blue")
164  .AddAttribute("fontname", "arial-bold");
165  }
166 
167  // First declares the nodes.
168  for (auto&& layer : m_Layers)
169  {
170  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
171  // Extracts the layer parameters.
172  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
173  node.GetContents().AddContent(name + " : " + value);
174  };
175  layer->SerializeLayerParameters(extractParams);
176  }
177 
178  // Second declares the edges.
179  for (auto&& layer : m_Layers)
180  {
181  LayerGuid toId = layer->GetGuid();
182 
183  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
184  {
185  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
186  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
187  DotEdge edge(stream, fromId, toId);
188 
189  // Now print the tensor shape on the edge.
190  {
191  // Constructs the label attribute with HTML markup.
192  std::stringstream ss;
193  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
194  edge.GetAttributeSet().AddAttribute("label", ss);
195  }
196  }
197  }
198  }
199 
200  if (stream.bad())
201  {
202  return Status::Failure;
203  }
204  return Status::Success;
205 }

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

◆ SetLayersOutOfOrder()

void SetLayersOutOfOrder ( )

Definition at line 697 of file Graph.cpp.

698 {
699  m_LayersInOrder = false;
700 }

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 472 of file Graph.cpp.

473 {
474  // Look through each layer in the new subgraph and add any that are not already a member of this graph
475  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
476  {
477  if (std::find(std::begin(m_Layers),
478  std::end(m_Layers),
479  iConnectableLayer) == std::end(m_Layers))
480  {
481  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
482  layer->Reparent(*this, m_Layers.end());
483  m_LayersInOrder = false;
484  }
485  });
486 
487  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
488  EraseSubgraphLayers(subgraph);
489  TopologicalSort();
490 }

References SubgraphView::ForEachIConnectableLayer(), and Graph::TopologicalSort().

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 461 of file Graph.cpp.

462 {
463  ARMNN_ASSERT(substituteLayer != nullptr);
464 
465  // Create a new sub-graph with only the given layer, using
466  // the given sub-graph as a reference of which parent graph to use
467  SubgraphView substituteSubgraph(substituteLayer);
468 
469  SubstituteSubgraph(subgraph, substituteSubgraph);
470 }

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations().

◆ TopologicalSort() [1/2]

const Graph & TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 184 of file Graph.hpp.

184 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }

References Graph::TopologicalSort().

Referenced by CheckOrder(), Graph::InferTensorInfos(), Optimizer::Pass(), Graph::Print(), Graph::SubstituteSubgraph(), Graph::TopologicalSort(), and Graph::VerifyConstantLayerSetTensorInfo().

◆ TopologicalSort() [2/2]

const Graph& TopologicalSort ( ) const

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 581 of file Graph.cpp.

582 {
583  for (auto&& layer : TopologicalSort())
584  {
585  if (layer->GetType() == armnn::LayerType::Constant)
586  {
587  for (auto&& output: layer->GetOutputSlots())
588  {
589  if (!output.IsTensorInfoSet())
590  {
591  std::ostringstream message;
592  message << "Output slot TensorInfo not set on "
593  << GetLayerTypeAsCString(layer->GetType())
594  << " layer \""
595  << layer->GetName()
596  << "\"";
597  throw LayerValidationException(message.str());
598  }
599  }
600  }
601  }
602 }

References armnn::Constant, armnn::GetLayerTypeAsCString(), and Graph::TopologicalSort().

Referenced by armnn::Optimize().

Friends And Related Function Documentation

◆ SubgraphView

friend class SubgraphView
friend

Definition at line 300 of file Graph.hpp.


The documentation for this class was generated from the following files:
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::Compute::Undefined
@ Undefined
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::Graph::GetPosInGraph
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:448
armnn::Graph::EraseLayer
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:461
armnn::Graph::SubgraphView
friend class SubgraphView
Definition: Graph.hpp:300
armnn::GraphEvent::LayerAdded
@ LayerAdded
armnn::Graph::Iterator
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::Graph::begin
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:169
armnn::EdgeStrategy::Undefined
@ Undefined
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Status::Success
@ Success
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::GraphEvent::LayerErased
@ LayerErased
armnn::LayerType::MemImport
@ MemImport
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::ShapeInferenceMethod::InferAndValidate
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
armnn::Graph::end
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:171
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Input
@ Input
armnn::Status::Failure
@ Failure
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant