ArmNN
 24.05
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< ConstantLayer >
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false, bool allowExpandedDims=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print (bool extended=false) const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots. More...
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void SetLayersOutOfOrder ()
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Friends

class SubgraphView
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 57 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 58 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 53 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 54 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false,
bool  allowExpandedDims = false 
)
inline

Definition at line 98 of file Graph.hpp.

99  : m_LayersInOrder(true)
100  , m_AllowExpandedDims(allowExpandedDims)
101  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
103  , m_Profiler(std::make_shared<IProfiler>())
104  {}

References armnn::InferAndValidate, and armnn::ValidateOnly.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 27 of file Graph.cpp.

28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 
56  if (otherInputSlot->IsTensorInfoOverridden())
57  {
58  inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59  }
60  outputSlot->Connect(inputSlot);
61  }
62  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63  ++outputSlot;
64  }
65  }
66 }

References Layer::BeginOutputSlots(), Layer::Clone(), Layer::GetInputSlot(), and InputSlot::SetTensorInfo().

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 110 of file Graph.hpp.

111  {
112  *this = std::move(other);
113  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 142 of file Graph.hpp.

143  {
144  ForEachLayer([](Layer* layer)
145  {
146  delete layer;
147  });
148  }

References Graph::ForEachLayer().

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 330 of file Graph.cpp.

332 {
333  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
334  // connections to other layers).
335  auto MayNeedCompatibilityLayer = [](const Layer& layer)
336  {
337  // All layers should have been associated with a valid compute device at this point.
338  if (layer.GetBackendId() == Compute::Undefined)
339  {
340  throw armnn::Exception("AddCompatibilityLayers: All layers must be assigned to a backend at this point.");
341  }
342  // Does not need another compatibility layer if a copy or import layer is already present.
343  return layer.GetType() != LayerType::MemCopy &&
344  layer.GetType() != LayerType::MemImport;
345  };
346 
347  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
348  {
349  return strategy == EdgeStrategy::CopyToTarget ||
350  strategy == EdgeStrategy::ExportToTarget;
351  };
352 
353  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
354  {
355  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(srcLayer, "source layer must not be null.");
356 
357  if (!MayNeedCompatibilityLayer(*srcLayer))
358  {
359  // The current layer does not need copy layers, move to the next one
360  return;
361  }
362 
363  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
364  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
365  {
366  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
367  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
368  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
369  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
370  {
371  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
372  if (!dstInputSlot)
373  {
374  throw armnn::Exception("dstInputSlot must not be null.");
375  }
376 
377  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
378  if (strategy == EdgeStrategy::Undefined)
379  {
380  throw armnn::Exception("Undefined memory strategy found "
381  "while adding copy layers for compatibility");
382  }
383 
384  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
385  if (MayNeedCompatibilityLayer(dstLayer) &&
386  IsCompatibilityStrategy(strategy))
387  {
388  // A copy layer is needed in between the source and destination layers.
389  // Record the operation rather than attempting to modify the graph as we go.
390  // (invalidating iterators)
391  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
392  srcLayer->GetName(),
393  srcOutputIndex,
394  dstLayer.GetName(),
395  dstInputSlot->GetSlotIndex());
396  Layer* compLayer = nullptr;
397  if (strategy == EdgeStrategy::CopyToTarget)
398  {
399  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
400  }
401  else
402  {
403  if (strategy != EdgeStrategy::ExportToTarget)
404  {
405  throw armnn::Exception("Invalid edge strategy found.");
406  }
407 
408  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
409  }
410 
411  compLayer->SetBackendId(dstLayer.GetBackendId());
412 
413  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
414  auto backendIt = backends.find(dstLayer.GetBackendId());
415  if (backendIt != backends.end() &&
416  backendIt->second &&
417  backendIt->second->SupportsTensorAllocatorAPI())
418  {
419  auto backend = backendIt->second.get();
420  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
421  bool found = false;
422 
423  for (auto preference : tensorHandleFactoryIds)
424  {
425  auto factory = registry.GetFactory(preference);
426  if (factory)
427  {
428  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
429  auto srcFactory = registry.GetFactory(srcPref);
430 
431  if (srcFactory)
432  {
433  bool canExportImport =
434  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
435 
436  if (factory->SupportsMapUnmap() || canExportImport)
437  {
438  compOutputSlot.SetTensorHandleFactory(preference);
439  found = true;
440  break;
441  }
442  }
443  }
444  }
445 
446  if (!found)
447  {
448  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
449  }
450  }
451  else
452  {
453  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
454  }
455 
456  // The output strategy of a compatibility layer is always DirectCompatibility.
457  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
458 
459  // Recalculate the connection index on the previous layer as we have just inserted into it.
460  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
461  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
462  std::find(newSourceConnections.begin(),
463  newSourceConnections.end(),
464  &compLayer->GetInputSlot(0)));
465 
466  // The input strategy of a compatibility layer is always DirectCompatibilty.
467  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
469  }
470  }
471  }
472  });
473 }

References ARMNN_THROW_INVALIDARG_MSG_IF_FALSE, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetInputSlot(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, Layer::SetBackendId(), OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by armnn::Optimize().

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 466 of file Graph.hpp.

467 {
468  m_LayersInOrder = m_LayersInOrder &&
469  ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
470  LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
471 
472  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
473  layer->SetAllowExpandedDims(m_AllowExpandedDims);
474 
475  NotifyObservables(GraphEvent::LayerAdded, layer);
476 
477  return layer;
478 }

References armnn::Input, armnn::LayerAdded, and armnn::Output.

Referenced by Layer::CloneBase(), and FuseBatchNorm< ConvLayer, ArmnnType, T >::Run().

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 207 of file Graph.cpp.

208 {
209  // Layers must be sorted in topological order
210  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_LayersInOrder, "layers must be in order.");
211 
212  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
213 
214  std::unordered_set<const ITensorHandle*> preallocatedTensors;
215  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
216 
217  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
218  // is a TensorHandle, the function just returns it
219  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
220  {
221  ITensorHandle* ancestor = subTensorHandle;
222  while (ancestor && ancestor->GetParent())
223  {
224  ancestor = ancestor->GetParent();
225  }
226  return ancestor;
227  };
228 
229  // Checks whether a TensorHandle has been pre-allocated
230  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
231  {
232  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
233  };
234 
235  // Constant tensor handles need to last from the beginning of execution till the end,
236  // therefore we pre-allocate them upfront
237  for (auto&& layer : m_Layers)
238  {
239  if (layer->GetType() == LayerType::Constant)
240  {
241  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
242  {
243  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
244 
245  if (tensorHandle && !IsPreallocated(tensorHandle))
246  {
247  tensorHandle->Allocate();
248  preallocatedTensors.insert(tensorHandle);
249  }
250  }
251  }
252  }
253 
254  // Iterate over the network in topological order
255  for (auto&& layer : m_Layers)
256  {
257  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
258  // The first time we encounter a new tensor handle, we start managing its lifetime.
259  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
260  {
261  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
262 
263  if (tensorHandle && !IsPreallocated(tensorHandle))
264  {
265  unsigned int numConnections = slot->GetNumConnections();
266  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
267  {
268  handleReferenceCounts[tensorHandle] = numConnections;
269  tensorHandle->Manage();
270  if (handleReferenceCounts[tensorHandle] == 0u)
271  {
272  // if nobody consumes this tensor we call Allocate()
273  tensorHandle->Allocate();
274  }
275  }
276  else
277  {
278  handleReferenceCounts[tensorHandle] += numConnections;
279  }
280  }
281  }
282 
283  // Loop through the input slots in the same layer and decrement the reference counter associated
284  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
285  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
286  {
287  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
288  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
289 
290  if (tensorHandle && !IsPreallocated(tensorHandle))
291  {
292  --handleReferenceCounts[tensorHandle];
293 
294  if (handleReferenceCounts[tensorHandle] == 0u)
295  {
296  // Stop managing lifetime of tensor handle
297  tensorHandle->Allocate();
298  handleReferenceCounts.erase(tensorHandle);
299  }
300  }
301  }
302  }
303 
304  return Status::Success;
305 }

References ITensorHandle::Allocate(), ARMNN_SCOPED_PROFILING_EVENT, ARMNN_THROW_INVALIDARG_MSG_IF_FALSE, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 226 of file Graph.hpp.

226  {
227  m_Views[notifyOnEvent].emplace_back(observable);
228  }

Referenced by GraphObservable< std::string >::GraphObservable().

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 176 of file Graph.hpp.

176 { return m_Layers.begin(); }

Referenced by Graph::cbegin(), armnn::Optimize(), and Optimizer::Pass().

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 181 of file Graph.hpp.

181 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 186 of file Graph.hpp.

186 { return begin(); }

References Graph::begin().

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 188 of file Graph.hpp.

188 { return end(); }

References Graph::end().

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 230 of file Graph.hpp.

230  {
231  m_Views[notifyOnEvent].remove(observable);
232  }

Referenced by GraphObservable< std::string >::~GraphObservable().

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 178 of file Graph.hpp.

178 { return m_Layers.end(); }

Referenced by Graph::cend(), armnn::Optimize(), and Optimizer::Pass().

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 183 of file Graph.hpp.

183 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

Deletes the layer at the specified position.

Definition at line 517 of file Graph.hpp.

518 {
519  NotifyObservables(GraphEvent::LayerErased, *pos);
520 
521  delete *pos;
522 }

References armnn::LayerErased.

Referenced by armnn::ApplyBackendOptimizations(), Graph::EraseLayer(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 525 of file Graph.hpp.

526 {
527  if (!layer)
528  {
529  throw armnn::NullPointerException("layer must not be null.");
530  }
531 
532  EraseLayer(GetPosInGraph(*layer));
533  layer = nullptr;
534 }

References Graph::EraseLayer(), and Graph::GetPosInGraph().

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

Referenced by Graph::AddCompatibilityLayers(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 199 of file Graph.hpp.

199 { return InputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 194 of file Graph.hpp.

194 { return m_InputIds.size(); }

Referenced by Graph::InputLayersAccessor::end(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

Definition at line 205 of file Graph.hpp.

205 { return m_Layers.size(); }

Referenced by LoadedNetwork::EnqueueWorkload().

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 195 of file Graph.hpp.

195 { return m_OutputIds.size(); }

Referenced by Graph::OutputLayersAccessor::begin(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 203 of file Graph.hpp.

203 { return OutputLayersAccessor(*this); }

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 455 of file Graph.hpp.

456 {
457  auto it = m_PosInGraphMap.find(&layer);
458  if (it == m_PosInGraphMap.end())
459  {
460  throw armnn::Exception("unable to find layer in graph map.");
461  }
462  return it->second;
463 }

Referenced by Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 733 of file Graph.cpp.

734 {
735  return m_Profiler;
736 }

Referenced by armnn::Optimize().

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 645 of file Graph.cpp.

646 {
647  for (auto&& layer : TopologicalSort())
648  {
649  for (auto&& input : layer->GetInputSlots())
650  {
651  const IOutputSlot* source = input.GetConnectedOutputSlot();
652  if (source == NULL)
653  {
654  // Throws exception due to a layer input not being connected to an output slot.
655  // Verifies input slot weights and bias are set for FullyConnected layers.
656  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
657  }
658 
659  if (!source->IsTensorInfoSet())
660  {
661  std::ostringstream message;
662  message << "Output slot TensorInfo not set on "
663  << GetLayerTypeAsCString(layer->GetType())
664  << " layer "
665  << std::quoted(layer->GetName());
666  throw LayerValidationException(message.str());
667  }
668  }
669 
670  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
671  {
672  layer->ValidateTensorShapesFromInputs();
673  }
674  }
675 }

References armnn::GetLayerTypeAsCString(), and IOutputSlot::IsTensorInfoSet().

Referenced by armnn::Optimize().

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 481 of file Graph.hpp.

482 {
483  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
484  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
485  const Iterator pos = (parentOut != nullptr)
486  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
487  : GetPosInGraph(insertBefore.GetOwningLayer());
488  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
489  insertBefore.Insert(*layer);
490 
491  NotifyObservables(GraphEvent::LayerAdded, layer);
492 
493  return layer;
494 }

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), MaxMinIntoBoundedReluImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), and FuseBatchNorm< ConvLayer, ArmnnType, T >::Run().

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 497 of file Graph.hpp.

498 {
499  Layer& owningLayer = insertAfter.GetOwningLayer();
500 
501  const Iterator pos = std::next(GetPosInGraph(owningLayer));
502  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
503 
504  if (layer->GetNumInputSlots() != 1)
505  {
506  throw armnn::Exception("layer must only one input slot.");
507  }
508 
509  insertAfter.MoveAllConnections(layer->GetOutputSlot());
510  insertAfter.Connect(layer->GetInputSlot(0));
511 
512  NotifyObservables(GraphEvent::LayerAdded, layer);
513 
514  return layer;
515 }

References OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 115 of file Graph.hpp.

116  {
117  m_InputIds = std::move(other.m_InputIds);
118  m_OutputIds = std::move(other.m_OutputIds);
119  m_LayersInOrder = std::move(other.m_LayersInOrder);
120  m_Views = std::move(other.m_Views);
121  m_Profiler = std::move(other.m_Profiler);
122  m_AllowExpandedDims = other.m_AllowExpandedDims;
123  m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
124  other.ForEachLayer([this](Layer* otherLayer)
125  {
126  otherLayer->Reparent(*this, m_Layers.end());
127  });
128 
129  if (!other.m_PosInGraphMap.empty())
130  {
131  throw armnn::Exception("assignment positions in graph map must be empty.");
132  }
133 
134  if (!other.m_Layers.empty())
135  {
136  throw armnn::Exception("assignment layers must be empty.");
137  }
138 
139  return *this;
140  }

References Layer::Reparent().

◆ Print()

Status Print ( bool  extended = false) const

Definition at line 68 of file Graph.cpp.

69 {
70  if (m_Layers.empty())
71  {
72  ARMNN_LOG(info) << "\n Graph is empty.\n";
73  return Status::Success;
74  }
75  ARMNN_LOG(info) << "\n";
76  ARMNN_LOG(info) << "Walking Pattern: \n";
77 
78  for (auto&& it : TopologicalSort())
79  {
80  auto numInputSlots = it->GetNumInputSlots();
81  auto numOutputSlots = it->GetNumOutputSlots();
82 
83  std::string guid;
84  if (extended)
85  {
86  guid += ":";
87  guid += std::to_string(it->GetGuid());
88  }
89  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
90  << ":" << it->GetBackendId().Get()
91  << guid
92  << " has " << numInputSlots << " input slots"
93  << " and " << numOutputSlots << " output slots.";
94 
95  for (auto i : it->GetInputSlots())
96  {
97  std::ostringstream message;
98  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
99  unsigned int numDims = inputTensorShape.GetNumDimensions();
100 
101  message << "The input slot has shape [ ";
102  for (unsigned int dim=0; dim < numDims; dim++)
103  {
104  message << inputTensorShape[dim] << ",";
105  }
106  message << " ]";
107  if (extended)
108  {
109  message << " Scale: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationScale();
110  message << " Offset: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationOffset();
111  message << " The input slot is connected to: ";
112  message << i.GetConnectedOutputSlot()->GetOwningIConnectableLayer().GetGuid();
113  }
114  ARMNN_LOG(info) << message.str();
115  }
116 
117  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
118  {
119  const armnn::Layer *layer = it;
120  std::ostringstream message;
121  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
122  unsigned int numDims = outputTensorShape.GetNumDimensions();
123 
124  message << "The output slot has shape [ ";
125  for (unsigned int dim=0; dim < numDims; dim++)
126  {
127  message << outputTensorShape[dim] << ",";
128  }
129  message << " ]";
130  if (extended)
131  {
132  message << " Scale: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationScale();
133  message << " Offset: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationOffset();
134  message << " The output slot is connected to: ";
135  message << layer->GetOutputSlots()[i].GetConnection(0)->GetOwningIConnectableLayer().GetGuid();
136  }
137  ARMNN_LOG(info) << message.str();
138  }
139  ARMNN_LOG(info) << "\n";
140  }
141  ARMNN_LOG(info) << "\n\n";
142 
143  return Status::Success;
144 }

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder().

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 146 of file Graph.cpp.

147 {
148  {
149  DotGraph graph(stream, "Optimized");
150 
151  {
152  // Default node attributes:
153  DotDefaults nodes(stream, "node");
154  nodes.GetAttributeSet()
155  .AddAttribute("shape", "record");
156  }
157 
158  {
159  // Default edge attributes:
160  DotDefaults edges(stream, "edge");
161  edges.GetAttributeSet()
162  .AddAttribute("fontsize", 8)
163  .AddAttribute("fontcolor", "blue")
164  .AddAttribute("fontname", "arial-bold");
165  }
166 
167  // First declares the nodes.
168  for (auto&& layer : m_Layers)
169  {
170  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
171  // Extracts the layer parameters.
172  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
173  node.GetContents().AddContent(name + " : " + value);
174  };
175  layer->SerializeLayerParameters(extractParams);
176  }
177 
178  // Second declares the edges.
179  for (auto&& layer : m_Layers)
180  {
181  LayerGuid toId = layer->GetGuid();
182 
183  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
184  {
185  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
186  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
187  DotEdge edge(stream, fromId, toId);
188 
189  // Now print the tensor shape on the edge.
190  {
191  // Constructs the label attribute with HTML markup.
192  std::stringstream ss;
193  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
194  edge.GetAttributeSet().AddAttribute("label", ss);
195  }
196  }
197  }
198  }
199 
200  if (stream.bad())
201  {
202  return Status::Failure;
203  }
204  return Status::Success;
205 }

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

◆ SetLayersOutOfOrder()

void SetLayersOutOfOrder ( )

Definition at line 738 of file Graph.cpp.

739 {
740  m_LayersInOrder = false;
741 }

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 486 of file Graph.cpp.

487 {
488  // Look through each layer in the new subgraph and add any that are not already a member of this graph
489  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
490  {
491  if (std::find(std::begin(m_Layers),
492  std::end(m_Layers),
493  iConnectableLayer) == std::end(m_Layers))
494  {
495  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
496  layer->Reparent(*this, m_Layers.end());
497  m_LayersInOrder = false;
498  }
499  });
500 
501  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
502  EraseSubgraphLayers(subgraph);
503  TopologicalSort();
504 }

References SubgraphView::ForEachIConnectableLayer(), and Graph::TopologicalSort().

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 475 of file Graph.cpp.

476 {
477  ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
478 
479  // Create a new sub-graph with only the given layer, using
480  // the given sub-graph as a reference of which parent graph to use
481  SubgraphView substituteSubgraph(substituteLayer);
482 
483  SubstituteSubgraph(subgraph, substituteSubgraph);
484 }

References ARMNN_THROW_INVALIDARG_MSG_IF_FALSE.

Referenced by armnn::ApplyBackendOptimizations().

◆ TopologicalSort() [1/2]

const Graph & TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 191 of file Graph.hpp.

191 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }

References Graph::TopologicalSort().

Referenced by CheckOrder(), Optimizer::Pass(), Graph::Print(), Graph::SubstituteSubgraph(), and Graph::TopologicalSort().

◆ TopologicalSort() [2/2]

const Graph& TopologicalSort ( ) const

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 622 of file Graph.cpp.

623 {
624  for (auto&& layer : TopologicalSort())
625  {
626  if (layer->GetType() == armnn::LayerType::Constant)
627  {
628  for (auto&& output: layer->GetOutputSlots())
629  {
630  if (!output.IsTensorInfoSet())
631  {
632  std::ostringstream message;
633  message << "Output slot TensorInfo not set on "
634  << GetLayerTypeAsCString(layer->GetType())
635  << " layer \""
636  << layer->GetName()
637  << "\"";
638  throw LayerValidationException(message.str());
639  }
640  }
641  }
642  }
643 }

References armnn::Constant, and armnn::GetLayerTypeAsCString().

Referenced by armnn::Optimize().

Friends And Related Function Documentation

◆ SubgraphView

friend class SubgraphView
friend

Definition at line 307 of file Graph.hpp.


The documentation for this class was generated from the following files:
armnn::Compute::Undefined
@ Undefined
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::Graph::GetPosInGraph
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:455
armnn::Graph::EraseLayer
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:517
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:475
armnn::Graph::SubgraphView
friend class SubgraphView
Definition: Graph.hpp:307
armnn::GraphEvent::LayerAdded
@ LayerAdded
armnn::Graph::Iterator
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::Graph::begin
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:176
armnn::EdgeStrategy::Undefined
@ Undefined
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Status::Success
@ Success
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::GraphEvent::LayerErased
@ LayerErased
armnn::LayerType::MemImport
@ MemImport
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::ShapeInferenceMethod::InferAndValidate
@ InferAndValidate
Infer missing output shapes and validate all output shapes.
armnn::Graph::end
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:178
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:191
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::LayerType::MemCopy
@ MemCopy
armnn::LayerType::Input
@ Input
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::Status::Failure
@ Failure
armnn::LayerType::Output
@ Output
armnn::LayerType::Constant
@ Constant
ARMNN_THROW_INVALIDARG_MSG_IF_FALSE
#define ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(_cond, _str)
Definition: Exceptions.hpp:210