ArmNN
 23.08
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< ConstantLayer >
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false, bool allowExpandedDims=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots. More...
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void SetLayersOutOfOrder ()
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Friends

class SubgraphView
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 57 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 58 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 53 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 54 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false,
bool  allowExpandedDims = false 
)
inline

Definition at line 98 of file Graph.hpp.

99  : m_LayersInOrder(true)
100  , m_AllowExpandedDims(allowExpandedDims)
101  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
103  , m_Profiler(std::make_shared<IProfiler>())
104  {}

References armnn::InferAndValidate, and armnn::ValidateOnly.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 27 of file Graph.cpp.

28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 
56  if (otherInputSlot->IsTensorInfoOverridden())
57  {
58  inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59  }
60  outputSlot->Connect(inputSlot);
61  }
62  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63  ++outputSlot;
64  }
65  }
66 }

References Layer::BeginOutputSlots(), Layer::Clone(), Layer::GetInputSlot(), and InputSlot::SetTensorInfo().

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 110 of file Graph.hpp.

111  {
112  *this = std::move(other);
113  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 135 of file Graph.hpp.

136  {
137  ForEachLayer([](Layer* layer)
138  {
139  delete layer;
140  });
141  }

References Graph::ForEachLayer().

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 308 of file Graph.cpp.

310 {
311  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
312  // connections to other layers).
313  auto MayNeedCompatibilityLayer = [](const Layer& layer)
314  {
315  // All layers should have been associated with a valid compute device at this point.
316  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
317  // Does not need another compatibility layer if a copy or import layer is already present.
318  return layer.GetType() != LayerType::MemCopy &&
319  layer.GetType() != LayerType::MemImport;
320  };
321 
322  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
323  {
324  return strategy == EdgeStrategy::CopyToTarget ||
325  strategy == EdgeStrategy::ExportToTarget;
326  };
327 
328  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
329  {
330  ARMNN_ASSERT(srcLayer);
331 
332  if (!MayNeedCompatibilityLayer(*srcLayer))
333  {
334  // The current layer does not need copy layers, move to the next one
335  return;
336  }
337 
338  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
339  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
340  {
341  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
342  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
343  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
344  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
345  {
346  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
347  ARMNN_ASSERT(dstInputSlot);
348 
349  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
351  "Undefined memory strategy found while adding copy layers for compatibility");
352 
353  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
354  if (MayNeedCompatibilityLayer(dstLayer) &&
355  IsCompatibilityStrategy(strategy))
356  {
357  // A copy layer is needed in between the source and destination layers.
358  // Record the operation rather than attempting to modify the graph as we go.
359  // (invalidating iterators)
360  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
361  srcLayer->GetName(),
362  srcOutputIndex,
363  dstLayer.GetName(),
364  dstInputSlot->GetSlotIndex());
365  Layer* compLayer = nullptr;
366  if (strategy == EdgeStrategy::CopyToTarget)
367  {
368  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
369  }
370  else
371  {
372  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
373  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
374  }
375 
376  compLayer->SetBackendId(dstLayer.GetBackendId());
377 
378  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
379  auto backendIt = backends.find(dstLayer.GetBackendId());
380  if (backendIt != backends.end() &&
381  backendIt->second &&
382  backendIt->second->SupportsTensorAllocatorAPI())
383  {
384  auto backend = backendIt->second.get();
385  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
386  bool found = false;
387 
388  for (auto preference : tensorHandleFactoryIds)
389  {
390  auto factory = registry.GetFactory(preference);
391  if (factory)
392  {
393  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
394  auto srcFactory = registry.GetFactory(srcPref);
395 
396  if (srcFactory)
397  {
398  bool canExportImport =
399  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
400 
401  if (factory->SupportsMapUnmap() || canExportImport)
402  {
403  compOutputSlot.SetTensorHandleFactory(preference);
404  found = true;
405  break;
406  }
407  }
408  }
409  }
410 
411  if (!found)
412  {
413  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
414  }
415  }
416  else
417  {
418  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
419  }
420 
421  // The output strategy of a compatibility layer is always DirectCompatibility.
422  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
423 
424  // Recalculate the connection index on the previous layer as we have just inserted into it.
425  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
426  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
427  std::find(newSourceConnections.begin(),
428  newSourceConnections.end(),
429  &compLayer->GetInputSlot(0)));
430 
431  // The input strategy of a compatibility layer is always DirectCompatibilty.
432  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
434  }
435  }
436  }
437  });
438 }

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetInputSlot(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, Layer::SetBackendId(), OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by armnn::Optimize().

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 456 of file Graph.hpp.

457 {
458  m_LayersInOrder = m_LayersInOrder &&
459  ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
460  LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
461 
462  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
463  layer->SetAllowExpandedDims(m_AllowExpandedDims);
464 
465  NotifyObservables(GraphEvent::LayerAdded, layer);
466 
467  return layer;
468 }

References armnn::Input, armnn::LayerAdded, and armnn::Output.

Referenced by Layer::CloneBase(), and FuseBatchNorm< ConvLayer, ArmnnType, T >::Run().

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 186 of file Graph.cpp.

187 {
188  // Layers must be sorted in topological order
189  ARMNN_ASSERT(m_LayersInOrder);
190  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
191 
192  std::unordered_set<const ITensorHandle*> preallocatedTensors;
193  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
194 
195  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
196  // is a TensorHandle, the function just returns it
197  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
198  {
199  ITensorHandle* ancestor = subTensorHandle;
200  while (ancestor && ancestor->GetParent())
201  {
202  ancestor = ancestor->GetParent();
203  }
204  return ancestor;
205  };
206 
207  // Checks whether a TensorHandle has been pre-allocated
208  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
209  {
210  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
211  };
212 
213  // Constant tensor handles need to last from the beginning of execution till the end,
214  // therefore we pre-allocate them upfront
215  for (auto&& layer : m_Layers)
216  {
217  if (layer->GetType() == LayerType::Constant)
218  {
219  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
220  {
221  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
222 
223  if (tensorHandle && !IsPreallocated(tensorHandle))
224  {
225  tensorHandle->Allocate();
226  preallocatedTensors.insert(tensorHandle);
227  }
228  }
229  }
230  }
231 
232  // Iterate over the network in topological order
233  for (auto&& layer : m_Layers)
234  {
235  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
236  // The first time we encounter a new tensor handle, we start managing its lifetime.
237  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
238  {
239  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
240 
241  if (tensorHandle && !IsPreallocated(tensorHandle))
242  {
243  unsigned int numConnections = slot->GetNumConnections();
244  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
245  {
246  handleReferenceCounts[tensorHandle] = numConnections;
247  tensorHandle->Manage();
248  if (handleReferenceCounts[tensorHandle] == 0u)
249  {
250  // if nobody consumes this tensor we call Allocate()
251  tensorHandle->Allocate();
252  }
253  }
254  else
255  {
256  handleReferenceCounts[tensorHandle] += numConnections;
257  }
258  }
259  }
260 
261  // Loop through the input slots in the same layer and decrement the reference counter associated
262  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
263  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
264  {
265  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
266  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
267 
268  if (tensorHandle && !IsPreallocated(tensorHandle))
269  {
270  --handleReferenceCounts[tensorHandle];
271 
272  if (handleReferenceCounts[tensorHandle] == 0u)
273  {
274  // Stop managing lifetime of tensor handle
275  tensorHandle->Allocate();
276  handleReferenceCounts.erase(tensorHandle);
277  }
278  }
279  }
280  }
281 
282  return Status::Success;
283 }

References ITensorHandle::Allocate(), ARMNN_ASSERT, ARMNN_SCOPED_PROFILING_EVENT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 219 of file Graph.hpp.

219  {
220  m_Views[notifyOnEvent].emplace_back(observable);
221  }

Referenced by GraphObservable< std::string >::GraphObservable().

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 169 of file Graph.hpp.

169 { return m_Layers.begin(); }

Referenced by Graph::cbegin(), armnn::Optimize(), and Optimizer::Pass().

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 174 of file Graph.hpp.

174 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 179 of file Graph.hpp.

179 { return begin(); }

References Graph::begin().

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 181 of file Graph.hpp.

181 { return end(); }

References Graph::end().

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 223 of file Graph.hpp.

223  {
224  m_Views[notifyOnEvent].remove(observable);
225  }

Referenced by GraphObservable< std::string >::~GraphObservable().

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 171 of file Graph.hpp.

171 { return m_Layers.end(); }

Referenced by Graph::cend(), armnn::Optimize(), and Optimizer::Pass().

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 176 of file Graph.hpp.

176 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

Deletes the layer at the specified position.

Definition at line 504 of file Graph.hpp.

505 {
506  NotifyObservables(GraphEvent::LayerErased, *pos);
507 
508  delete *pos;
509 }

References armnn::LayerErased.

Referenced by armnn::ApplyBackendOptimizations(), Graph::EraseLayer(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 512 of file Graph.hpp.

513 {
514  ARMNN_ASSERT(layer != nullptr);
515  EraseLayer(GetPosInGraph(*layer));
516  layer = nullptr;
517 }

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

Referenced by Graph::AddCompatibilityLayers(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 192 of file Graph.hpp.

192 { return InputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 187 of file Graph.hpp.

187 { return m_InputIds.size(); }

Referenced by Graph::InputLayersAccessor::end(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

Definition at line 198 of file Graph.hpp.

198 { return m_Layers.size(); }

Referenced by LoadedNetwork::EnqueueWorkload().

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 188 of file Graph.hpp.

188 { return m_OutputIds.size(); }

Referenced by Graph::OutputLayersAccessor::begin(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 196 of file Graph.hpp.

196 { return OutputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 448 of file Graph.hpp.

449 {
450  auto it = m_PosInGraphMap.find(&layer);
451  ARMNN_ASSERT(it != m_PosInGraphMap.end());
452  return it->second;
453 }

References ARMNN_ASSERT.

Referenced by Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 671 of file Graph.cpp.

672 {
673  return m_Profiler;
674 }

Referenced by armnn::Optimize().

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 583 of file Graph.cpp.

584 {
585  for (auto&& layer : TopologicalSort())
586  {
587  for (auto&& input : layer->GetInputSlots())
588  {
589  const IOutputSlot* source = input.GetConnectedOutputSlot();
590  if (source == NULL)
591  {
592  // Throws exception due to a layer input not being connected to an output slot.
593  // Verifies input slot weights and bias are set for FullyConnected layers.
594  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
595  }
596 
597  if (!source->IsTensorInfoSet())
598  {
599  std::ostringstream message;
600  message << "Output slot TensorInfo not set on "
601  << GetLayerTypeAsCString(layer->GetType())
602  << " layer "
603  << std::quoted(layer->GetName());
604  throw LayerValidationException(message.str());
605  }
606  }
607 
608  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
609  {
610  layer->ValidateTensorShapesFromInputs();
611  }
612  }
613 }

References armnn::GetLayerTypeAsCString(), IOutputSlot::IsTensorInfoSet(), Graph::TopologicalSort(), and armnn::ValidateOnly.

Referenced by armnn::Optimize().

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 471 of file Graph.hpp.

472 {
473  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
474  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
475  const Iterator pos = (parentOut != nullptr)
476  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
477  : GetPosInGraph(insertBefore.GetOwningLayer());
478  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
479  insertBefore.Insert(*layer);
480 
481  NotifyObservables(GraphEvent::LayerAdded, layer);
482 
483  return layer;
484 }

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), and AddBroadcastReshapeLayerImpl::Run().

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 487 of file Graph.hpp.

488 {
489  Layer& owningLayer = insertAfter.GetOwningLayer();
490 
491  const Iterator pos = std::next(GetPosInGraph(owningLayer));
492  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
493 
494  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
495 
496  insertAfter.MoveAllConnections(layer->GetOutputSlot());
497  insertAfter.Connect(layer->GetInputSlot(0));
498 
499  NotifyObservables(GraphEvent::LayerAdded, layer);
500 
501  return layer;
502 }

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 115 of file Graph.hpp.

116  {
117  m_InputIds = std::move(other.m_InputIds);
118  m_OutputIds = std::move(other.m_OutputIds);
119  m_LayersInOrder = std::move(other.m_LayersInOrder);
120  m_Views = std::move(other.m_Views);
121  m_Profiler = std::move(other.m_Profiler);
122  m_AllowExpandedDims = other.m_AllowExpandedDims;
123  m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
124  other.ForEachLayer([this](Layer* otherLayer)
125  {
126  otherLayer->Reparent(*this, m_Layers.end());
127  });
128 
129  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
130  ARMNN_ASSERT(other.m_Layers.empty());
131 
132  return *this;
133  }

References ARMNN_ASSERT, and Layer::Reparent().

◆ Print()

Status Print ( ) const

Definition at line 68 of file Graph.cpp.

69 {
70  if (m_Layers.empty())
71  {
72  ARMNN_LOG(info) << "\n Graph is empty.\n";
73  return Status::Success;
74  }
75  ARMNN_LOG(info) << "\n";
76  ARMNN_LOG(info) << "Walking Pattern: \n";
77 
78  for (auto&& it : TopologicalSort())
79  {
80  auto numInputSlots = it->GetNumInputSlots();
81  auto numOutputSlots = it->GetNumOutputSlots();
82 
83  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
84  << ":" << it->GetBackendId().Get()
85  << " has " << numInputSlots << " input slots"
86  << " and " << numOutputSlots << " output slots.";
87 
88  for (auto i : it->GetInputSlots())
89  {
90  std::ostringstream message;
91  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
92  unsigned int numDims = inputTensorShape.GetNumDimensions();
93 
94  message << "The input slot has shape [ ";
95  for (unsigned int dim=0; dim < numDims; dim++)
96  {
97  message << inputTensorShape[dim] << ",";
98  }
99  message << " ]";
100  ARMNN_LOG(info) << message.str();
101  }
102 
103  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
104  {
105  const armnn::Layer *layer = it;
106  std::ostringstream message;
107  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
108  unsigned int numDims = outputTensorShape.GetNumDimensions();
109 
110  message << "The output slot has shape [ ";
111  for (unsigned int dim=0; dim < numDims; dim++)
112  {
113  message << outputTensorShape[dim] << ",";
114  }
115  message << " ]";
116  ARMNN_LOG(info) << message.str();
117  }
118  ARMNN_LOG(info) << "\n";
119  }
120  ARMNN_LOG(info) << "\n\n";
121 
122  return Status::Success;
123 }

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder().

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 125 of file Graph.cpp.

126 {
127  {
128  DotGraph graph(stream, "Optimized");
129 
130  {
131  // Default node attributes:
132  DotDefaults nodes(stream, "node");
133  nodes.GetAttributeSet()
134  .AddAttribute("shape", "record");
135  }
136 
137  {
138  // Default edge attributes:
139  DotDefaults edges(stream, "edge");
140  edges.GetAttributeSet()
141  .AddAttribute("fontsize", 8)
142  .AddAttribute("fontcolor", "blue")
143  .AddAttribute("fontname", "arial-bold");
144  }
145 
146  // First declares the nodes.
147  for (auto&& layer : m_Layers)
148  {
149  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
150  // Extracts the layer parameters.
151  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
152  node.GetContents().AddContent(name + " : " + value);
153  };
154  layer->SerializeLayerParameters(extractParams);
155  }
156 
157  // Second declares the edges.
158  for (auto&& layer : m_Layers)
159  {
160  LayerGuid toId = layer->GetGuid();
161 
162  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
163  {
164  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
165  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
166  DotEdge edge(stream, fromId, toId);
167 
168  // Now print the tensor shape on the edge.
169  {
170  // Constructs the label attribute with HTML markup.
171  std::stringstream ss;
172  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
173  edge.GetAttributeSet().AddAttribute("label", ss);
174  }
175  }
176  }
177  }
178 
179  if (stream.bad())
180  {
181  return Status::Failure;
182  }
183  return Status::Success;
184 }

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

◆ SetLayersOutOfOrder()

void SetLayersOutOfOrder ( )

Definition at line 676 of file Graph.cpp.

677 {
678  m_LayersInOrder = false;
679 }

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 451 of file Graph.cpp.

452 {
453  // Look through each layer in the new subgraph and add any that are not already a member of this graph
454  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
455  {
456  if (std::find(std::begin(m_Layers),
457  std::end(m_Layers),
458  iConnectableLayer) == std::end(m_Layers))
459  {
460  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
461  layer->Reparent(*this, m_Layers.end());
462  m_LayersInOrder = false;
463  }
464  });
465 
466  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
467  EraseSubgraphLayers(subgraph);
468  TopologicalSort();
469 }

References SubgraphView::ForEachIConnectableLayer(), and Graph::TopologicalSort().

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 440 of file Graph.cpp.

441 {
442  ARMNN_ASSERT(substituteLayer != nullptr);
443 
444  // Create a new sub-graph with only the given layer, using
445  // the given sub-graph as a reference of which parent graph to use
446  SubgraphView substituteSubgraph(substituteLayer);
447 
448  SubstituteSubgraph(subgraph, substituteSubgraph);
449 }

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations().

◆ TopologicalSort() [1/2]

const Graph & TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 184 of file Graph.hpp.

184 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }

References Graph::TopologicalSort().

Referenced by CheckOrder(), Graph::InferTensorInfos(), Optimizer::Pass(), Graph::Print(), Graph::SubstituteSubgraph(), Graph::TopologicalSort(), and Graph::VerifyConstantLayerSetTensorInfo().

◆ TopologicalSort() [2/2]

const Graph& TopologicalSort ( ) const

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 560 of file Graph.cpp.

561 {
562  for (auto&& layer : TopologicalSort())
563  {
564  if (layer->GetType() == armnn::LayerType::Constant)
565  {
566  for (auto&& output: layer->GetOutputSlots())
567  {
568  if (!output.IsTensorInfoSet())
569  {
570  std::ostringstream message;
571  message << "Output slot TensorInfo not set on "
572  << GetLayerTypeAsCString(layer->GetType())
573  << " layer \""
574  << layer->GetName()
575  << "\"";
576  throw LayerValidationException(message.str());
577  }
578  }
579  }
580  }
581 }

References armnn::Constant, armnn::GetLayerTypeAsCString(), and Graph::TopologicalSort().

Referenced by armnn::Optimize().

Friends And Related Function Documentation

◆ SubgraphView

friend class SubgraphView
friend

Definition at line 300 of file Graph.hpp.


The documentation for this class was generated from the following files:
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::Compute::Undefined
@ Undefined
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::Graph::GetPosInGraph
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:448
armnn::Graph::EraseLayer
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:440
armnn::Graph::SubgraphView
friend class SubgraphView
Definition: Graph.hpp:300
armnn::GraphEvent::LayerAdded
@ LayerAdded
armnn::Graph::Iterator
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::Graph::begin
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:169
armnn::EdgeStrategy::Undefined
@ Undefined
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Status::Success
@ Success
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::GraphEvent::LayerErased
@ LayerErased
armnn::LayerType::MemImport
@ MemImport
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::ShapeInferenceMethod::InferAndValidate
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
armnn::Graph::end
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:171
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Input
@ Input
armnn::Status::Failure
@ Failure
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant