ArmNN
 20.02
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = boost::transform_iterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = boost::transform_iterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = boost::transform_iterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph ()
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Detailed Description

Definition at line 29 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = boost::transform_iterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 53 of file Graph.hpp.

◆ ConstIteratorInputs

using ConstIteratorInputs = boost::transform_iterator<decltype(&PtrCast<const InputLayer>), Iterator>

Definition at line 54 of file Graph.hpp.

◆ ConstIteratorOutputs

using ConstIteratorOutputs = boost::transform_iterator<decltype(&PtrCast<const OutputLayer>), Iterator>

Definition at line 55 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 50 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 51 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 49 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( )
inline

Definition at line 95 of file Graph.hpp.

References Graph::operator=().

95 : m_LayersInOrder(true) {}

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 28 of file Graph.cpp.

References Layer::BeginOutputSlots(), Layer::Clone(), and Layer::GetInputSlot().

29 : m_LayersInOrder(other.m_LayersInOrder)
30 {
31  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
32 
33  for (auto&& otherLayer : other.m_Layers)
34  {
35  Layer* const layer = otherLayer->Clone(*this);
36  otherToClonedMap.emplace(otherLayer, layer);
37  }
38 
39  // Copies slot connections.
40  for (auto&& otherLayer : other.m_Layers)
41  {
42  Layer* const thisLayer = otherToClonedMap[otherLayer];
43 
44  auto outputSlot = thisLayer->BeginOutputSlots();
45  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
46  {
47  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
48  {
49  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
50  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
51 
52  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
53  outputSlot->Connect(inputSlot);
54  }
55  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
56  ++outputSlot;
57  }
58  }
59 }

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 101 of file Graph.hpp.

102  {
103  *this = std::move(other);
104  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 124 of file Graph.hpp.

References Graph::AddLayer(), Graph::EraseLayer(), Graph::ForEachLayer(), Graph::InsertNewLayer(), Graph::Print(), and Graph::SerializeToDot().

125  {
126  ForEachLayer([](Layer* layer)
127  {
128  delete layer;
129  });
130  }
void ForEachLayer(Func func) const
Definition: Graph.hpp:39

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 263 of file Graph.cpp.

References armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), and Graph::GetNumLayers().

265 {
266  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
267  // connections to other layers).
268  auto MayNeedCompatibilityLayer = [](const Layer& layer)
269  {
270  // All layers should have been associated with a valid compute device at this point.
271  BOOST_ASSERT(layer.GetBackendId() != Compute::Undefined);
272  // Does not need another compatibility layer if a copy or import layer is already present.
273  return layer.GetType() != LayerType::MemCopy &&
274  layer.GetType() != LayerType::MemImport;
275  };
276 
277  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
278  {
279  return strategy == EdgeStrategy::CopyToTarget ||
280  strategy == EdgeStrategy::ExportToTarget;
281  };
282 
283  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
284  {
285  BOOST_ASSERT(srcLayer);
286 
287  if (!MayNeedCompatibilityLayer(*srcLayer))
288  {
289  // The current layer does not need copy layers, move to the next one
290  return;
291  }
292 
293  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
294  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
295  {
296  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
297  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
298  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
299  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
300  {
301  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
302  BOOST_ASSERT(dstInputSlot);
303 
304  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
305  BOOST_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
306  "Undefined memory strategy found while adding copy layers for compatibility");
307 
308  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
309  if (MayNeedCompatibilityLayer(dstLayer) &&
310  IsCompatibilityStrategy(strategy))
311  {
312  // A copy layer is needed in between the source and destination layers.
313  // Record the operation rather than attempting to modify the graph as we go.
314  // (invalidating iterators)
315  const std::string compLayerName = boost::str(boost::format("[ %1% (%2%) -> %3% (%4%) ]")
316  % srcLayer->GetName()
317  % srcOutputIndex
318  % dstLayer.GetName()
319  % dstInputSlot->GetSlotIndex());
320 
321  Layer* compLayer = nullptr;
322  if (strategy == EdgeStrategy::CopyToTarget)
323  {
324  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
325  }
326  else
327  {
328  BOOST_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
329  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
330  }
331 
332  compLayer->SetBackendId(dstLayer.GetBackendId());
333 
334  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
335  auto backendIt = backends.find(dstLayer.GetBackendId());
336  if (backendIt != backends.end() &&
337  backendIt->second &&
338  backendIt->second->SupportsTensorAllocatorAPI())
339  {
340  auto backend = backendIt->second.get();
341  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
342  bool found = false;
343 
344  for (auto preference : tensorHandleFactoryIds)
345  {
346  auto factory = registry.GetFactory(preference);
347  if (factory)
348  {
349  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
350  auto srcFactory = registry.GetFactory(srcPref);
351 
352  if (srcFactory)
353  {
354  bool canExportImport =
355  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
356 
357  if (factory->SupportsMapUnmap() || canExportImport)
358  {
359  compOutputSlot.SetTensorHandleFactory(preference);
360  found = true;
361  break;
362  }
363  }
364  }
365  }
366 
367  if (!found)
368  {
369  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
370  }
371  }
372  else
373  {
374  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
375  }
376 
377  // The output strategy of a compatibility layer is always DirectCompatibility.
378  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
379 
380  // Recalculate the connection index on the previous layer as we have just inserted into it.
381  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
382  long newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
383  std::find(newSourceConnections.begin(),
384  newSourceConnections.end(),
385  &compLayer->GetInputSlot(0)));
386 
387  // The input strategy of a compatibility layer is always DirectCompatibilty.
388  srcOutputSlot.SetEdgeStrategy(boost::numeric_cast<unsigned int>(newSrcConnectionIndex),
390  }
391  }
392  }
393  });
394 }
No strategy has been defined. Used internally to verify integrity of optimizations.
Source backends tensor data can be exported to destination backend tensor without copy...
Destination backend can work directly with tensors on source backend.
void ForEachLayer(Func func) const
Definition: Graph.hpp:39
static const FactoryId LegacyFactoryId

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 142 of file Graph.cpp.

References ITensorHandle::Allocate(), armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), and armnn::Success.

Referenced by Graph::GetNumLayers().

143 {
144  // Layers must be sorted in topological order
145  BOOST_ASSERT(m_LayersInOrder);
146 
147  std::unordered_set<const ITensorHandle*> preallocatedTensors;
148  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
149 
150  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
151  // is a TensorHandle, the function just returns it
152  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
153  {
154  ITensorHandle* ancestor = subTensorHandle;
155  while (ancestor && ancestor->GetParent())
156  {
157  ancestor = ancestor->GetParent();
158  }
159  return ancestor;
160  };
161 
162  // Checks whether a TensorHandle has been pre-allocated
163  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
164  {
165  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
166  };
167 
168  // Constant tensor handles need to last from the beginning of execution till the end,
169  // therefore we pre-allocate them upfront
170  for (auto&& layer : m_Layers)
171  {
172  if (layer->GetType() == LayerType::Constant)
173  {
174  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
175  {
176  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
177 
178  if (tensorHandle && !IsPreallocated(tensorHandle))
179  {
180  tensorHandle->Allocate();
181  preallocatedTensors.insert(tensorHandle);
182  }
183  }
184  }
185  }
186 
187  // Iterate over the network in topological order
188  for (auto&& layer : m_Layers)
189  {
190  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
191  // The first time we encounter a new tensor handle, we start managing its lifetime.
192  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
193  {
194  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
195 
196  if (tensorHandle && !IsPreallocated(tensorHandle))
197  {
198  unsigned int numConnections = slot->GetNumConnections();
199  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
200  {
201  handleReferenceCounts[tensorHandle] = numConnections;
202  tensorHandle->Manage();
203  if (handleReferenceCounts[tensorHandle] == 0u)
204  {
205  // if nobody consumes this tensor we call Allocate()
206  tensorHandle->Allocate();
207  }
208  }
209  else
210  {
211  handleReferenceCounts[tensorHandle] += numConnections;
212  }
213  }
214  }
215 
216  // Loop through the input slots in the same layer and decrement the reference counter associated
217  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
218  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
219  {
220  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
221  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
222 
223  if (tensorHandle && !IsPreallocated(tensorHandle))
224  {
225  --handleReferenceCounts[tensorHandle];
226 
227  if (handleReferenceCounts[tensorHandle] == 0u)
228  {
229  // Stop managing lifetime of tensor handle
230  tensorHandle->Allocate();
231  handleReferenceCounts.erase(tensorHandle);
232  }
233  }
234  }
235  }
236 
237  return Status::Success;
238 }

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 204 of file Graph.hpp.

Referenced by GraphObservable< Layer *>::GraphObservable().

204  {
205  m_Views[notifyOnEvent].emplace_back(observable);
206  }

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 158 of file Graph.hpp.

Referenced by BOOST_AUTO_TEST_CASE(), and Optimizer::Pass().

158 { return m_Layers.begin(); }

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 163 of file Graph.hpp.

163 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 168 of file Graph.hpp.

References Graph::InputLayersAccessor::begin().

Referenced by BOOST_AUTO_TEST_CASE().

168 { return begin(); }
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:158

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 170 of file Graph.hpp.

References Graph::InputLayersAccessor::end().

Referenced by BOOST_AUTO_TEST_CASE().

170 { return end(); }
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:160

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 208 of file Graph.hpp.

References Graph::GetPosInGraph(), armnn::IgnoreUnused(), armnn::Input, Graph::InputLayersAccessor::m_Graph, armnn::Output, and armnnUtils::Filesystem::Remove().

Referenced by GraphObservable< Layer *>::~GraphObservable().

208  {
209  m_Views[notifyOnEvent].remove(observable);
210  }

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 160 of file Graph.hpp.

Referenced by Optimizer::Pass().

160 { return m_Layers.end(); }

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 165 of file Graph.hpp.

165 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

Deletes the layer at the specified position.

Definition at line 442 of file Graph.hpp.

References armnn::LayerErased.

Referenced by BOOST_AUTO_TEST_CASE(), Graph::EraseLayer(), DynamicQuantizationVisitor::FinishVisit(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), Graph::SubstituteSubgraph(), and Graph::~Graph().

443 {
444  NotifyObservables(GraphEvent::LayerErased, *pos);
445 
446  delete *pos;
447 }

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 450 of file Graph.hpp.

References Graph::EraseLayer(), and Graph::GetPosInGraph().

451 {
452  BOOST_ASSERT(layer != nullptr);
453  EraseLayer(GetPosInGraph(*layer));
454  layer = nullptr;
455 }
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:442
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:389

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 39 of file Graph.hpp.

Referenced by Graph::AddCompatibilityLayers(), BOOST_AUTO_TEST_CASE(), Graph::operator=(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

40  {
41  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
42  {
43  auto next = std::next(it);
44  func(*it);
45  it = next;
46  }
47  }

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 181 of file Graph.hpp.

References Graph::InputLayersAccessor::InputLayersAccessor().

Referenced by armnn::BOOST_AUTO_TEST_CASE(), LoadedNetwork::EnqueueWorkload(), armnn::GetInputTensorInfo(), and NetworkQuantizer::OverrideInputRange().

181 { return InputLayersAccessor(*this); }

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 176 of file Graph.hpp.

Referenced by Graph::InputLayersAccessor::end(), and LoadedNetwork::EnqueueWorkload().

176 { return m_InputIds.size(); }

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 177 of file Graph.hpp.

Referenced by Graph::OutputLayersAccessor::begin(), and LoadedNetwork::EnqueueWorkload().

177 { return m_OutputIds.size(); }

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 185 of file Graph.hpp.

Referenced by LoadedNetwork::EnqueueWorkload().

185 { return OutputLayersAccessor(*this); }

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 389 of file Graph.hpp.

Referenced by Graph::DetachObservable(), Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

390 {
391  auto it = m_PosInGraphMap.find(&layer);
392  BOOST_ASSERT(it != m_PosInGraphMap.end());
393  return it->second;
394 }

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 493 of file Graph.cpp.

References armnn::GetLayerTypeAsCString(), IOutputSlot::IsTensorInfoSet(), and Graph::TopologicalSort().

Referenced by BOOST_AUTO_TEST_CASE(), Graph::GetNumLayers(), PreluValidateTensorShapesFromInputsMatchTest(), PreluValidateTensorShapesFromInputsNoMatchTest(), StackValidateTensorShapesFromInputsMatchTest(), and StackValidateTensorShapesFromInputsNoMatchTest().

494 {
495  for (auto&& layer : TopologicalSort())
496  {
497  for (auto&& input : layer->GetInputSlots())
498  {
499  const IOutputSlot* source = input.GetConnectedOutputSlot();
500  if (source == NULL)
501  {
502  std::ostringstream message;
503  message << "Input not connected on "
504  << GetLayerTypeAsCString(layer->GetType())
505  << " layer \""
506  << layer->GetName()
507  << "\"";
508  throw LayerValidationException(message.str());
509  }
510 
511  if (!source->IsTensorInfoSet())
512  {
513  throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
514  }
515  }
516  layer->ValidateTensorShapesFromInputs();
517  }
518 }
char const * GetLayerTypeAsCString(LayerType type)
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:173

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 409 of file Graph.hpp.

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by BOOST_AUTO_TEST_CASE(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), FoldPadIntoConvolution2dImpl::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), and Graph::~Graph().

410 {
411  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
412  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
413  const Iterator pos = (parentOut != nullptr)
414  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
415  : GetPosInGraph(insertBefore.GetOwningLayer());
416  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
417  insertBefore.Insert(*layer);
418 
419  NotifyObservables(GraphEvent::LayerAdded, layer);
420 
421  return layer;
422 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:389

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 425 of file Graph.hpp.

References OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

426 {
427  Layer& owningLayer = insertAfter.GetOwningLayer();
428 
429  const Iterator pos = std::next(GetPosInGraph(owningLayer));
430  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
431 
432  BOOST_ASSERT(layer->GetNumInputSlots() == 1);
433 
434  insertAfter.MoveAllConnections(layer->GetOutputSlot());
435  insertAfter.Connect(layer->GetInputSlot(0));
436 
437  NotifyObservables(GraphEvent::LayerAdded, layer);
438 
439  return layer;
440 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:389

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

Referenced by Graph::Graph().

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 106 of file Graph.hpp.

References Graph::ForEachLayer(), and Layer::Reparent().

107  {
108  m_InputIds = std::move(other.m_InputIds);
109  m_OutputIds = std::move(other.m_OutputIds);
110  m_LayersInOrder = std::move(other.m_LayersInOrder);
111  m_Views = std::move(other.m_Views);
112 
113  other.ForEachLayer([this](Layer* otherLayer)
114  {
115  otherLayer->Reparent(*this, m_Layers.end());
116  });
117 
118  BOOST_ASSERT(other.m_PosInGraphMap.empty());
119  BOOST_ASSERT(other.m_Layers.empty());
120 
121  return *this;
122  }

◆ Print()

Status Print ( ) const

Definition at line 61 of file Graph.cpp.

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder(), and Graph::~Graph().

62 {
63  if (m_Layers.empty())
64  {
65  ARMNN_LOG(info) << "\n Graph is empty.\n";
66  return Status::Success;
67  }
68  ARMNN_LOG(info) << "\n";
69  ARMNN_LOG(info) << "Walking Pattern: \n";
70 
71  for (auto&& it : TopologicalSort())
72  {
73  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
74  << ":" << it->GetBackendId().Get();
75  }
76  ARMNN_LOG(info) << "\n\n";
77 
78  return Status::Success;
79 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
char const * GetLayerTypeAsCString(LayerType type)
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:173

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 33 of file Graph.hpp.

34  {
35  return boost::polymorphic_downcast<LayerType*>(layer);
36  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 81 of file Graph.cpp.

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

Referenced by BOOST_AUTO_TEST_CASE(), and Graph::~Graph().

82 {
83  {
84  DotGraph graph(stream, "Optimized");
85 
86  {
87  // Default node attributes:
88  DotDefaults nodes(stream, "node");
89  nodes.GetAttributeSet()
90  .AddAttribute("shape", "record");
91  }
92 
93  {
94  // Default edge attributes:
95  DotDefaults edges(stream, "edge");
96  edges.GetAttributeSet()
97  .AddAttribute("fontsize", 8)
98  .AddAttribute("fontcolor", "blue")
99  .AddAttribute("fontname", "arial-bold");
100  }
101 
102  // First declares the nodes.
103  for (auto&& layer : m_Layers)
104  {
105  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
106  // Extracts the layer parameters.
107  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
108  node.GetContents().AddContent(name + " : " + value);
109  };
110  layer->SerializeLayerParameters(extractParams);
111  }
112 
113  // Second declares the edges.
114  for (auto&& layer : m_Layers)
115  {
116  LayerGuid toId = layer->GetGuid();
117 
118  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
119  {
120  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
121  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
122  DotEdge edge(stream, fromId, toId);
123 
124  // Now print the tensor shape on the edge.
125  {
126  // Constructs the label attribute with HTML markup.
127  std::stringstream ss;
128  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
129  edge.GetAttributeSet().AddAttribute("label", ss);
130  }
131  }
132  }
133  }
134 
135  if (stream.bad())
136  {
137  return Status::Failure;
138  }
139  return Status::Success;
140 }
char const * GetLayerTypeAsCString(LayerType type)
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:236
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 396 of file Graph.cpp.

Referenced by armnn::ApplyBackendOptimizations(), BOOST_AUTO_TEST_CASE(), and Graph::GetNumLayers().

397 {
398  BOOST_ASSERT(substituteLayer != nullptr);
399 
400  ReplaceSubgraphConnections(subgraph, substituteLayer);
401  EraseSubgraphLayers(subgraph);
402 }

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 404 of file Graph.cpp.

References SubgraphView::Clear(), IOutputSlot::Connect(), IOutputSlot::Disconnect(), Graph::EraseLayer(), SubgraphView::ForEachLayer(), InputSlot::GetConnection(), SubgraphView::GetInputSlots(), SubgraphView::GetLayers(), SubgraphView::GetOutputSlots(), armnn::IgnoreUnused(), OutputSlot::MoveAllConnections(), armnn::numeric_cast(), Layer::Reparent(), and Graph::TopologicalSort().

405 {
406  // Look through each layer in the new subgraph and add any that are not already a member of this graph
407  substituteSubgraph.ForEachLayer([this](Layer* layer)
408  {
409  if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
410  {
411  layer->Reparent(*this, m_Layers.end());
412  m_LayersInOrder = false;
413  }
414  });
415 
416  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
417  EraseSubgraphLayers(subgraph);
418  TopologicalSort();
419 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:173

◆ TopologicalSort() [1/2]

Graph& TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 173 of file Graph.hpp.

References Graph::TopologicalSort().

Referenced by BOOST_AUTO_TEST_CASE(), CheckOrder(), NetworkQuantizer::ExportNetwork(), Graph::InferTensorInfos(), LoadedNetwork::MakeLoadedNetwork(), Optimizer::Pass(), Graph::Print(), NetworkQuantizer::Refine(), Graph::SubstituteSubgraph(), and Graph::TopologicalSort().

173 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:173

◆ TopologicalSort() [2/2]

const Graph & TopologicalSort ( ) const

Definition at line 240 of file Graph.cpp.

241 {
242  if (!m_LayersInOrder)
243  {
244  // Resets layer order.
245  for (auto&& it : m_Layers)
246  {
247  it->ResetPriority();
248  }
249 
250  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
251  {
252  return layerA->GetPriority() < layerB->GetPriority();
253  };
254 
255  m_Layers.sort(compareLayerPriority);
256 
257  m_LayersInOrder = true;
258  }
259 
260  return *this;
261 }

The documentation for this class was generated from the following files: