ArmNN
 23.05
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< ConstantLayer >
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false, bool allowExpandedDims=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots. More...
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void SetLayersOutOfOrder ()
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Friends

class SubgraphView
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 57 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 58 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 53 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 54 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false,
bool  allowExpandedDims = false 
)
inline

Definition at line 98 of file Graph.hpp.

99  : m_LayersInOrder(true)
100  , m_AllowExpandedDims(allowExpandedDims)
101  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
103  , m_Profiler(std::make_shared<IProfiler>())
104  {}

References armnn::InferAndValidate, and armnn::ValidateOnly.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 27 of file Graph.cpp.

28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55  outputSlot->Connect(inputSlot);
56  }
57  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
58  ++outputSlot;
59  }
60  }
61 }

References Layer::BeginOutputSlots(), Layer::Clone(), and Layer::GetInputSlot().

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 110 of file Graph.hpp.

111  {
112  *this = std::move(other);
113  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 135 of file Graph.hpp.

136  {
137  ForEachLayer([](Layer* layer)
138  {
139  delete layer;
140  });
141  }

References Graph::ForEachLayer().

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 303 of file Graph.cpp.

305 {
306  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
307  // connections to other layers).
308  auto MayNeedCompatibilityLayer = [](const Layer& layer)
309  {
310  // All layers should have been associated with a valid compute device at this point.
311  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
312  // Does not need another compatibility layer if a copy or import layer is already present.
313  return layer.GetType() != LayerType::MemCopy &&
314  layer.GetType() != LayerType::MemImport;
315  };
316 
317  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
318  {
319  return strategy == EdgeStrategy::CopyToTarget ||
320  strategy == EdgeStrategy::ExportToTarget;
321  };
322 
323  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
324  {
325  ARMNN_ASSERT(srcLayer);
326 
327  if (!MayNeedCompatibilityLayer(*srcLayer))
328  {
329  // The current layer does not need copy layers, move to the next one
330  return;
331  }
332 
333  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
334  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
335  {
336  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
337  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
338  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
339  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
340  {
341  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
342  ARMNN_ASSERT(dstInputSlot);
343 
344  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
346  "Undefined memory strategy found while adding copy layers for compatibility");
347 
348  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
349  if (MayNeedCompatibilityLayer(dstLayer) &&
350  IsCompatibilityStrategy(strategy))
351  {
352  // A copy layer is needed in between the source and destination layers.
353  // Record the operation rather than attempting to modify the graph as we go.
354  // (invalidating iterators)
355  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
356  srcLayer->GetName(),
357  srcOutputIndex,
358  dstLayer.GetName(),
359  dstInputSlot->GetSlotIndex());
360  Layer* compLayer = nullptr;
361  if (strategy == EdgeStrategy::CopyToTarget)
362  {
363  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
364  }
365  else
366  {
367  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
368  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
369  }
370 
371  compLayer->SetBackendId(dstLayer.GetBackendId());
372 
373  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
374  auto backendIt = backends.find(dstLayer.GetBackendId());
375  if (backendIt != backends.end() &&
376  backendIt->second &&
377  backendIt->second->SupportsTensorAllocatorAPI())
378  {
379  auto backend = backendIt->second.get();
380  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
381  bool found = false;
382 
383  for (auto preference : tensorHandleFactoryIds)
384  {
385  auto factory = registry.GetFactory(preference);
386  if (factory)
387  {
388  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
389  auto srcFactory = registry.GetFactory(srcPref);
390 
391  if (srcFactory)
392  {
393  bool canExportImport =
394  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
395 
396  if (factory->SupportsMapUnmap() || canExportImport)
397  {
398  compOutputSlot.SetTensorHandleFactory(preference);
399  found = true;
400  break;
401  }
402  }
403  }
404  }
405 
406  if (!found)
407  {
408  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
409  }
410  }
411  else
412  {
413  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
414  }
415 
416  // The output strategy of a compatibility layer is always DirectCompatibility.
417  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
418 
419  // Recalculate the connection index on the previous layer as we have just inserted into it.
420  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
421  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
422  std::find(newSourceConnections.begin(),
423  newSourceConnections.end(),
424  &compLayer->GetInputSlot(0)));
425 
426  // The input strategy of a compatibility layer is always DirectCompatibilty.
427  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
429  }
430  }
431  }
432  });
433 }

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetInputSlot(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, Layer::SetBackendId(), OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by armnn::Optimize().

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 456 of file Graph.hpp.

457 {
458  m_LayersInOrder = m_LayersInOrder &&
459  ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
460  LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
461 
462  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
463  layer->SetAllowExpandedDims(m_AllowExpandedDims);
464 
465  NotifyObservables(GraphEvent::LayerAdded, layer);
466 
467  return layer;
468 }

References armnn::Input, armnn::LayerAdded, and armnn::Output.

Referenced by Layer::CloneBase(), and FuseBatchNorm< ConvLayer, ArmnnType, T >::Run().

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 181 of file Graph.cpp.

182 {
183  // Layers must be sorted in topological order
184  ARMNN_ASSERT(m_LayersInOrder);
185  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
186 
187  std::unordered_set<const ITensorHandle*> preallocatedTensors;
188  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
189 
190  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
191  // is a TensorHandle, the function just returns it
192  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
193  {
194  ITensorHandle* ancestor = subTensorHandle;
195  while (ancestor && ancestor->GetParent())
196  {
197  ancestor = ancestor->GetParent();
198  }
199  return ancestor;
200  };
201 
202  // Checks whether a TensorHandle has been pre-allocated
203  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
204  {
205  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
206  };
207 
208  // Constant tensor handles need to last from the beginning of execution till the end,
209  // therefore we pre-allocate them upfront
210  for (auto&& layer : m_Layers)
211  {
212  if (layer->GetType() == LayerType::Constant)
213  {
214  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
215  {
216  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
217 
218  if (tensorHandle && !IsPreallocated(tensorHandle))
219  {
220  tensorHandle->Allocate();
221  preallocatedTensors.insert(tensorHandle);
222  }
223  }
224  }
225  }
226 
227  // Iterate over the network in topological order
228  for (auto&& layer : m_Layers)
229  {
230  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
231  // The first time we encounter a new tensor handle, we start managing its lifetime.
232  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
233  {
234  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
235 
236  if (tensorHandle && !IsPreallocated(tensorHandle))
237  {
238  unsigned int numConnections = slot->GetNumConnections();
239  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
240  {
241  handleReferenceCounts[tensorHandle] = numConnections;
242  tensorHandle->Manage();
243  if (handleReferenceCounts[tensorHandle] == 0u)
244  {
245  // if nobody consumes this tensor we call Allocate()
246  tensorHandle->Allocate();
247  }
248  }
249  else
250  {
251  handleReferenceCounts[tensorHandle] += numConnections;
252  }
253  }
254  }
255 
256  // Loop through the input slots in the same layer and decrement the reference counter associated
257  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
258  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
259  {
260  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
261  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
262 
263  if (tensorHandle && !IsPreallocated(tensorHandle))
264  {
265  --handleReferenceCounts[tensorHandle];
266 
267  if (handleReferenceCounts[tensorHandle] == 0u)
268  {
269  // Stop managing lifetime of tensor handle
270  tensorHandle->Allocate();
271  handleReferenceCounts.erase(tensorHandle);
272  }
273  }
274  }
275  }
276 
277  return Status::Success;
278 }

References ITensorHandle::Allocate(), ARMNN_ASSERT, ARMNN_SCOPED_PROFILING_EVENT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 219 of file Graph.hpp.

219  {
220  m_Views[notifyOnEvent].emplace_back(observable);
221  }

Referenced by GraphObservable< std::string >::GraphObservable().

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 169 of file Graph.hpp.

169 { return m_Layers.begin(); }

Referenced by Graph::cbegin(), armnn::Optimize(), and Optimizer::Pass().

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 174 of file Graph.hpp.

174 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 179 of file Graph.hpp.

179 { return begin(); }

References Graph::begin().

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 181 of file Graph.hpp.

181 { return end(); }

References Graph::end().

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 223 of file Graph.hpp.

223  {
224  m_Views[notifyOnEvent].remove(observable);
225  }

Referenced by GraphObservable< std::string >::~GraphObservable().

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 171 of file Graph.hpp.

171 { return m_Layers.end(); }

Referenced by Graph::cend(), armnn::Optimize(), and Optimizer::Pass().

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 176 of file Graph.hpp.

176 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

Deletes the layer at the specified position.

Definition at line 504 of file Graph.hpp.

505 {
506  NotifyObservables(GraphEvent::LayerErased, *pos);
507 
508  delete *pos;
509 }

References armnn::LayerErased.

Referenced by Graph::EraseLayer(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 512 of file Graph.hpp.

513 {
514  ARMNN_ASSERT(layer != nullptr);
515  EraseLayer(GetPosInGraph(*layer));
516  layer = nullptr;
517 }

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

Referenced by Graph::AddCompatibilityLayers(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 192 of file Graph.hpp.

192 { return InputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 187 of file Graph.hpp.

187 { return m_InputIds.size(); }

Referenced by Graph::InputLayersAccessor::end(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

Definition at line 198 of file Graph.hpp.

198 { return m_Layers.size(); }

Referenced by LoadedNetwork::EnqueueWorkload().

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 188 of file Graph.hpp.

188 { return m_OutputIds.size(); }

Referenced by Graph::OutputLayersAccessor::begin(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 196 of file Graph.hpp.

196 { return OutputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 448 of file Graph.hpp.

449 {
450  auto it = m_PosInGraphMap.find(&layer);
451  ARMNN_ASSERT(it != m_PosInGraphMap.end());
452  return it->second;
453 }

References ARMNN_ASSERT.

Referenced by Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 656 of file Graph.cpp.

657 {
658  return m_Profiler;
659 }

Referenced by armnn::Optimize().

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 568 of file Graph.cpp.

569 {
570  for (auto&& layer : TopologicalSort())
571  {
572  for (auto&& input : layer->GetInputSlots())
573  {
574  const IOutputSlot* source = input.GetConnectedOutputSlot();
575  if (source == NULL)
576  {
577  // Throws exception due to a layer input not being connected to an output slot.
578  // Verifies input slot weights and bias are set for FullyConnected layers.
579  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
580  }
581 
582  if (!source->IsTensorInfoSet())
583  {
584  std::ostringstream message;
585  message << "Output slot TensorInfo not set on "
586  << GetLayerTypeAsCString(layer->GetType())
587  << " layer "
588  << std::quoted(layer->GetName());
589  throw LayerValidationException(message.str());
590  }
591  }
592 
593  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
594  {
595  layer->ValidateTensorShapesFromInputs();
596  }
597  }
598 }

References armnn::GetLayerTypeAsCString(), IOutputSlot::IsTensorInfoSet(), Graph::TopologicalSort(), and armnn::ValidateOnly.

Referenced by armnn::Optimize().

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 471 of file Graph.hpp.

472 {
473  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
474  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
475  const Iterator pos = (parentOut != nullptr)
476  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
477  : GetPosInGraph(insertBefore.GetOwningLayer());
478  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
479  insertBefore.Insert(*layer);
480 
481  NotifyObservables(GraphEvent::LayerAdded, layer);
482 
483  return layer;
484 }

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), and AddBroadcastReshapeLayerImpl::Run().

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 487 of file Graph.hpp.

488 {
489  Layer& owningLayer = insertAfter.GetOwningLayer();
490 
491  const Iterator pos = std::next(GetPosInGraph(owningLayer));
492  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
493 
494  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
495 
496  insertAfter.MoveAllConnections(layer->GetOutputSlot());
497  insertAfter.Connect(layer->GetInputSlot(0));
498 
499  NotifyObservables(GraphEvent::LayerAdded, layer);
500 
501  return layer;
502 }

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 115 of file Graph.hpp.

116  {
117  m_InputIds = std::move(other.m_InputIds);
118  m_OutputIds = std::move(other.m_OutputIds);
119  m_LayersInOrder = std::move(other.m_LayersInOrder);
120  m_Views = std::move(other.m_Views);
121  m_Profiler = std::move(other.m_Profiler);
122  m_AllowExpandedDims = other.m_AllowExpandedDims;
123  m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
124  other.ForEachLayer([this](Layer* otherLayer)
125  {
126  otherLayer->Reparent(*this, m_Layers.end());
127  });
128 
129  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
130  ARMNN_ASSERT(other.m_Layers.empty());
131 
132  return *this;
133  }

References ARMNN_ASSERT, and Layer::Reparent().

◆ Print()

Status Print ( ) const

Definition at line 63 of file Graph.cpp.

64 {
65  if (m_Layers.empty())
66  {
67  ARMNN_LOG(info) << "\n Graph is empty.\n";
68  return Status::Success;
69  }
70  ARMNN_LOG(info) << "\n";
71  ARMNN_LOG(info) << "Walking Pattern: \n";
72 
73  for (auto&& it : TopologicalSort())
74  {
75  auto numInputSlots = it->GetNumInputSlots();
76  auto numOutputSlots = it->GetNumOutputSlots();
77 
78  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
79  << ":" << it->GetBackendId().Get()
80  << " has " << numInputSlots << " input slots"
81  << " and " << numOutputSlots << " output slots.";
82 
83  for (auto i : it->GetInputSlots())
84  {
85  std::ostringstream message;
86  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
87  unsigned int numDims = inputTensorShape.GetNumDimensions();
88 
89  message << "The input slot has shape [ ";
90  for (unsigned int dim=0; dim < numDims; dim++)
91  {
92  message << inputTensorShape[dim] << ",";
93  }
94  message << " ]";
95  ARMNN_LOG(info) << message.str();
96  }
97 
98  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
99  {
100  const armnn::Layer *layer = it;
101  std::ostringstream message;
102  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
103  unsigned int numDims = outputTensorShape.GetNumDimensions();
104 
105  message << "The output slot has shape [ ";
106  for (unsigned int dim=0; dim < numDims; dim++)
107  {
108  message << outputTensorShape[dim] << ",";
109  }
110  message << " ]";
111  ARMNN_LOG(info) << message.str();
112  }
113  ARMNN_LOG(info) << "\n";
114  }
115  ARMNN_LOG(info) << "\n\n";
116 
117  return Status::Success;
118 }

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder().

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 120 of file Graph.cpp.

121 {
122  {
123  DotGraph graph(stream, "Optimized");
124 
125  {
126  // Default node attributes:
127  DotDefaults nodes(stream, "node");
128  nodes.GetAttributeSet()
129  .AddAttribute("shape", "record");
130  }
131 
132  {
133  // Default edge attributes:
134  DotDefaults edges(stream, "edge");
135  edges.GetAttributeSet()
136  .AddAttribute("fontsize", 8)
137  .AddAttribute("fontcolor", "blue")
138  .AddAttribute("fontname", "arial-bold");
139  }
140 
141  // First declares the nodes.
142  for (auto&& layer : m_Layers)
143  {
144  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
145  // Extracts the layer parameters.
146  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
147  node.GetContents().AddContent(name + " : " + value);
148  };
149  layer->SerializeLayerParameters(extractParams);
150  }
151 
152  // Second declares the edges.
153  for (auto&& layer : m_Layers)
154  {
155  LayerGuid toId = layer->GetGuid();
156 
157  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
158  {
159  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
160  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
161  DotEdge edge(stream, fromId, toId);
162 
163  // Now print the tensor shape on the edge.
164  {
165  // Constructs the label attribute with HTML markup.
166  std::stringstream ss;
167  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
168  edge.GetAttributeSet().AddAttribute("label", ss);
169  }
170  }
171  }
172  }
173 
174  if (stream.bad())
175  {
176  return Status::Failure;
177  }
178  return Status::Success;
179 }

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

◆ SetLayersOutOfOrder()

void SetLayersOutOfOrder ( )

Definition at line 661 of file Graph.cpp.

662 {
663  m_LayersInOrder = false;
664 }

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 446 of file Graph.cpp.

447 {
448  // Look through each layer in the new subgraph and add any that are not already a member of this graph
449  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
450  {
451  if (std::find(std::begin(m_Layers),
452  std::end(m_Layers),
453  iConnectableLayer) == std::end(m_Layers))
454  {
455  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
456  layer->Reparent(*this, m_Layers.end());
457  m_LayersInOrder = false;
458  }
459  });
460 
461  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
462  EraseSubgraphLayers(subgraph);
463  TopologicalSort();
464 }

References SubgraphView::ForEachIConnectableLayer(), and Graph::TopologicalSort().

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 435 of file Graph.cpp.

436 {
437  ARMNN_ASSERT(substituteLayer != nullptr);
438 
439  // Create a new sub-graph with only the given layer, using
440  // the given sub-graph as a reference of which parent graph to use
441  SubgraphView substituteSubgraph(substituteLayer);
442 
443  SubstituteSubgraph(subgraph, substituteSubgraph);
444 }

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations().

◆ TopologicalSort() [1/2]

const Graph & TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 184 of file Graph.hpp.

184 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }

References Graph::TopologicalSort().

Referenced by CheckOrder(), Graph::InferTensorInfos(), Optimizer::Pass(), Graph::Print(), Graph::SubstituteSubgraph(), Graph::TopologicalSort(), and Graph::VerifyConstantLayerSetTensorInfo().

◆ TopologicalSort() [2/2]

const Graph& TopologicalSort ( ) const

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 545 of file Graph.cpp.

546 {
547  for (auto&& layer : TopologicalSort())
548  {
549  if (layer->GetType() == armnn::LayerType::Constant)
550  {
551  for (auto&& output: layer->GetOutputSlots())
552  {
553  if (!output.IsTensorInfoSet())
554  {
555  std::ostringstream message;
556  message << "Output slot TensorInfo not set on "
557  << GetLayerTypeAsCString(layer->GetType())
558  << " layer \""
559  << layer->GetName()
560  << "\"";
561  throw LayerValidationException(message.str());
562  }
563  }
564  }
565  }
566 }

References armnn::Constant, armnn::GetLayerTypeAsCString(), and Graph::TopologicalSort().

Referenced by armnn::Optimize().

Friends And Related Function Documentation

◆ SubgraphView

friend class SubgraphView
friend

Definition at line 300 of file Graph.hpp.


The documentation for this class was generated from the following files:
armnn::LayerType::MemCopy
@ MemCopy
armnn::Graph::Iterator
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::Graph::SubgraphView
friend class SubgraphView
Definition: Graph.hpp:300
armnn::LayerType::Input
@ Input
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::GraphEvent::LayerErased
@ LayerErased
armnn::Layer
Definition: Layer.hpp:217
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::Graph::begin
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:169
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::GraphEvent::LayerAdded
@ LayerAdded
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:435
armnn::Graph::end
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:171
armnn::Status::Failure
@ Failure
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:246
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::LayerType::Constant
@ Constant
armnn::Status::Success
@ Success
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Compute::Undefined
@ Undefined
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::Graph::GetPosInGraph
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:448
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::LayerType::Output
@ Output
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::LayerType::MemImport
@ MemImport
armnn::Graph::EraseLayer
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::ShapeInferenceMethod::InferAndValidate
@ InferAndValidate
Infer missing output shapes and validate all output shapes.