ArmNN
 20.05
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = boost::transform_iterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = boost::transform_iterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = boost::transform_iterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph ()
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = boost::transform_iterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 54 of file Graph.hpp.

◆ ConstIteratorInputs

using ConstIteratorInputs = boost::transform_iterator<decltype(&PtrCast<const InputLayer>), Iterator>

Definition at line 55 of file Graph.hpp.

◆ ConstIteratorOutputs

using ConstIteratorOutputs = boost::transform_iterator<decltype(&PtrCast<const OutputLayer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 51 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 52 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( )
inline

Definition at line 96 of file Graph.hpp.

References Graph::operator=().

96 : m_LayersInOrder(true) {}

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 27 of file Graph.cpp.

References Layer::BeginOutputSlots(), Layer::Clone(), and Layer::GetInputSlot().

28 : m_LayersInOrder(other.m_LayersInOrder)
29 {
30  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
31 
32  for (auto&& otherLayer : other.m_Layers)
33  {
34  Layer* const layer = otherLayer->Clone(*this);
35  otherToClonedMap.emplace(otherLayer, layer);
36  }
37 
38  // Copies slot connections.
39  for (auto&& otherLayer : other.m_Layers)
40  {
41  Layer* const thisLayer = otherToClonedMap[otherLayer];
42 
43  auto outputSlot = thisLayer->BeginOutputSlots();
44  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
45  {
46  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
47  {
48  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
49  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
50 
51  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
52  outputSlot->Connect(inputSlot);
53  }
54  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
55  ++outputSlot;
56  }
57  }
58 }

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 102 of file Graph.hpp.

103  {
104  *this = std::move(other);
105  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 125 of file Graph.hpp.

References Graph::AddLayer(), Graph::EraseLayer(), Graph::ForEachLayer(), Graph::InsertNewLayer(), Graph::Print(), and Graph::SerializeToDot().

126  {
127  ForEachLayer([](Layer* layer)
128  {
129  delete layer;
130  });
131  }
void ForEachLayer(Func func) const
Definition: Graph.hpp:40

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 262 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), and Graph::GetNumLayers().

264 {
265  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
266  // connections to other layers).
267  auto MayNeedCompatibilityLayer = [](const Layer& layer)
268  {
269  // All layers should have been associated with a valid compute device at this point.
270  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
271  // Does not need another compatibility layer if a copy or import layer is already present.
272  return layer.GetType() != LayerType::MemCopy &&
273  layer.GetType() != LayerType::MemImport;
274  };
275 
276  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
277  {
278  return strategy == EdgeStrategy::CopyToTarget ||
279  strategy == EdgeStrategy::ExportToTarget;
280  };
281 
282  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
283  {
284  ARMNN_ASSERT(srcLayer);
285 
286  if (!MayNeedCompatibilityLayer(*srcLayer))
287  {
288  // The current layer does not need copy layers, move to the next one
289  return;
290  }
291 
292  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
293  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
294  {
295  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
296  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
297  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
298  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
299  {
300  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
301  ARMNN_ASSERT(dstInputSlot);
302 
303  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
305  "Undefined memory strategy found while adding copy layers for compatibility");
306 
307  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
308  if (MayNeedCompatibilityLayer(dstLayer) &&
309  IsCompatibilityStrategy(strategy))
310  {
311  // A copy layer is needed in between the source and destination layers.
312  // Record the operation rather than attempting to modify the graph as we go.
313  // (invalidating iterators)
314  const std::string compLayerName = boost::str(boost::format("[ %1% (%2%) -> %3% (%4%) ]")
315  % srcLayer->GetName()
316  % srcOutputIndex
317  % dstLayer.GetName()
318  % dstInputSlot->GetSlotIndex());
319 
320  Layer* compLayer = nullptr;
321  if (strategy == EdgeStrategy::CopyToTarget)
322  {
323  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
324  }
325  else
326  {
327  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
328  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
329  }
330 
331  compLayer->SetBackendId(dstLayer.GetBackendId());
332 
333  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
334  auto backendIt = backends.find(dstLayer.GetBackendId());
335  if (backendIt != backends.end() &&
336  backendIt->second &&
337  backendIt->second->SupportsTensorAllocatorAPI())
338  {
339  auto backend = backendIt->second.get();
340  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
341  bool found = false;
342 
343  for (auto preference : tensorHandleFactoryIds)
344  {
345  auto factory = registry.GetFactory(preference);
346  if (factory)
347  {
348  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
349  auto srcFactory = registry.GetFactory(srcPref);
350 
351  if (srcFactory)
352  {
353  bool canExportImport =
354  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
355 
356  if (factory->SupportsMapUnmap() || canExportImport)
357  {
358  compOutputSlot.SetTensorHandleFactory(preference);
359  found = true;
360  break;
361  }
362  }
363  }
364  }
365 
366  if (!found)
367  {
368  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
369  }
370  }
371  else
372  {
373  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
374  }
375 
376  // The output strategy of a compatibility layer is always DirectCompatibility.
377  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
378 
379  // Recalculate the connection index on the previous layer as we have just inserted into it.
380  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
381  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
382  std::find(newSourceConnections.begin(),
383  newSourceConnections.end(),
384  &compLayer->GetInputSlot(0)));
385 
386  // The input strategy of a compatibility layer is always DirectCompatibilty.
387  srcOutputSlot.SetEdgeStrategy(boost::numeric_cast<unsigned int>(newSrcConnectionIndex),
389  }
390  }
391  }
392  });
393 }
No strategy has been defined. Used internally to verify integrity of optimizations.
Source backends tensor data can be exported to destination backend tensor without copy...
Destination backend can work directly with tensors on source backend.
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
static const FactoryId LegacyFactoryId

◆ AddLayer()

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 141 of file Graph.cpp.

References ITensorHandle::Allocate(), ARMNN_ASSERT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), and armnn::Success.

Referenced by Graph::GetNumLayers().

142 {
143  // Layers must be sorted in topological order
144  ARMNN_ASSERT(m_LayersInOrder);
145 
146  std::unordered_set<const ITensorHandle*> preallocatedTensors;
147  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
148 
149  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
150  // is a TensorHandle, the function just returns it
151  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
152  {
153  ITensorHandle* ancestor = subTensorHandle;
154  while (ancestor && ancestor->GetParent())
155  {
156  ancestor = ancestor->GetParent();
157  }
158  return ancestor;
159  };
160 
161  // Checks whether a TensorHandle has been pre-allocated
162  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
163  {
164  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
165  };
166 
167  // Constant tensor handles need to last from the beginning of execution till the end,
168  // therefore we pre-allocate them upfront
169  for (auto&& layer : m_Layers)
170  {
171  if (layer->GetType() == LayerType::Constant)
172  {
173  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
174  {
175  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
176 
177  if (tensorHandle && !IsPreallocated(tensorHandle))
178  {
179  tensorHandle->Allocate();
180  preallocatedTensors.insert(tensorHandle);
181  }
182  }
183  }
184  }
185 
186  // Iterate over the network in topological order
187  for (auto&& layer : m_Layers)
188  {
189  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
190  // The first time we encounter a new tensor handle, we start managing its lifetime.
191  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
192  {
193  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
194 
195  if (tensorHandle && !IsPreallocated(tensorHandle))
196  {
197  unsigned int numConnections = slot->GetNumConnections();
198  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
199  {
200  handleReferenceCounts[tensorHandle] = numConnections;
201  tensorHandle->Manage();
202  if (handleReferenceCounts[tensorHandle] == 0u)
203  {
204  // if nobody consumes this tensor we call Allocate()
205  tensorHandle->Allocate();
206  }
207  }
208  else
209  {
210  handleReferenceCounts[tensorHandle] += numConnections;
211  }
212  }
213  }
214 
215  // Loop through the input slots in the same layer and decrement the reference counter associated
216  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
217  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
218  {
219  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
220  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
221 
222  if (tensorHandle && !IsPreallocated(tensorHandle))
223  {
224  --handleReferenceCounts[tensorHandle];
225 
226  if (handleReferenceCounts[tensorHandle] == 0u)
227  {
228  // Stop managing lifetime of tensor handle
229  tensorHandle->Allocate();
230  handleReferenceCounts.erase(tensorHandle);
231  }
232  }
233  }
234  }
235 
236  return Status::Success;
237 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 205 of file Graph.hpp.

Referenced by GraphObservable< Layer *>::GraphObservable().

205  {
206  m_Views[notifyOnEvent].emplace_back(observable);
207  }

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 159 of file Graph.hpp.

Referenced by BOOST_AUTO_TEST_CASE(), and Optimizer::Pass().

159 { return m_Layers.begin(); }

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 164 of file Graph.hpp.

164 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 169 of file Graph.hpp.

References Graph::InputLayersAccessor::begin().

Referenced by BOOST_AUTO_TEST_CASE().

169 { return begin(); }
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:159

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 171 of file Graph.hpp.

References Graph::InputLayersAccessor::end().

Referenced by BOOST_AUTO_TEST_CASE().

171 { return end(); }
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:161

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 209 of file Graph.hpp.

References ARMNN_ASSERT, Graph::GetPosInGraph(), armnn::IgnoreUnused(), armnn::Input, Graph::InputLayersAccessor::m_Graph, armnn::Output, and armnnUtils::Filesystem::Remove().

Referenced by GraphObservable< Layer *>::~GraphObservable().

209  {
210  m_Views[notifyOnEvent].remove(observable);
211  }

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 161 of file Graph.hpp.

Referenced by Optimizer::Pass().

161 { return m_Layers.end(); }

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 166 of file Graph.hpp.

166 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

Deletes the layer at the specified position.

Definition at line 443 of file Graph.hpp.

References armnn::LayerErased.

Referenced by BOOST_AUTO_TEST_CASE(), Graph::EraseLayer(), DynamicQuantizationVisitor::FinishVisit(), Optimizer::Pass(), OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), Graph::SubstituteSubgraph(), and Graph::~Graph().

444 {
445  NotifyObservables(GraphEvent::LayerErased, *pos);
446 
447  delete *pos;
448 }

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 451 of file Graph.hpp.

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

452 {
453  ARMNN_ASSERT(layer != nullptr);
454  EraseLayer(GetPosInGraph(*layer));
455  layer = nullptr;
456 }
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:443
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:390
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

Referenced by Graph::AddCompatibilityLayers(), BOOST_AUTO_TEST_CASE(), Graph::operator=(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 182 of file Graph.hpp.

References Graph::InputLayersAccessor::InputLayersAccessor().

Referenced by armnn::BOOST_AUTO_TEST_CASE(), LoadedNetwork::EnqueueWorkload(), armnn::GetInputTensorInfo(), and NetworkQuantizer::OverrideInputRange().

182 { return InputLayersAccessor(*this); }

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 177 of file Graph.hpp.

Referenced by Graph::InputLayersAccessor::end(), and LoadedNetwork::EnqueueWorkload().

177 { return m_InputIds.size(); }

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 178 of file Graph.hpp.

Referenced by Graph::OutputLayersAccessor::begin(), and LoadedNetwork::EnqueueWorkload().

178 { return m_OutputIds.size(); }

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 186 of file Graph.hpp.

Referenced by LoadedNetwork::EnqueueWorkload().

186 { return OutputLayersAccessor(*this); }

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 390 of file Graph.hpp.

References ARMNN_ASSERT.

Referenced by Graph::DetachObservable(), Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

391 {
392  auto it = m_PosInGraphMap.find(&layer);
393  ARMNN_ASSERT(it != m_PosInGraphMap.end());
394  return it->second;
395 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 492 of file Graph.cpp.

References armnn::GetLayerTypeAsCString(), IOutputSlot::IsTensorInfoSet(), and Graph::TopologicalSort().

Referenced by BOOST_AUTO_TEST_CASE(), Graph::GetNumLayers(), PreluValidateTensorShapesFromInputsMatchTest(), PreluValidateTensorShapesFromInputsNoMatchTest(), StackValidateTensorShapesFromInputsMatchTest(), and StackValidateTensorShapesFromInputsNoMatchTest().

493 {
494  for (auto&& layer : TopologicalSort())
495  {
496  for (auto&& input : layer->GetInputSlots())
497  {
498  const IOutputSlot* source = input.GetConnectedOutputSlot();
499  if (source == NULL)
500  {
501  std::ostringstream message;
502  message << "Input not connected on "
503  << GetLayerTypeAsCString(layer->GetType())
504  << " layer \""
505  << layer->GetName()
506  << "\"";
507  throw LayerValidationException(message.str());
508  }
509 
510  if (!source->IsTensorInfoSet())
511  {
512  throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
513  }
514  }
515  layer->ValidateTensorShapesFromInputs();
516  }
517 }
char const * GetLayerTypeAsCString(LayerType type)
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:174

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 410 of file Graph.hpp.

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by BOOST_AUTO_TEST_CASE(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), FoldPadIntoConvolution2dImpl::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), and Graph::~Graph().

411 {
412  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
413  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
414  const Iterator pos = (parentOut != nullptr)
415  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
416  : GetPosInGraph(insertBefore.GetOwningLayer());
417  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
418  insertBefore.Insert(*layer);
419 
420  NotifyObservables(GraphEvent::LayerAdded, layer);
421 
422  return layer;
423 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:51
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:390

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 426 of file Graph.hpp.

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

427 {
428  Layer& owningLayer = insertAfter.GetOwningLayer();
429 
430  const Iterator pos = std::next(GetPosInGraph(owningLayer));
431  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
432 
433  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
434 
435  insertAfter.MoveAllConnections(layer->GetOutputSlot());
436  insertAfter.Connect(layer->GetInputSlot(0));
437 
438  NotifyObservables(GraphEvent::LayerAdded, layer);
439 
440  return layer;
441 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:51
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:390
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

Referenced by Graph::Graph().

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 107 of file Graph.hpp.

References ARMNN_ASSERT, Graph::ForEachLayer(), and Layer::Reparent().

108  {
109  m_InputIds = std::move(other.m_InputIds);
110  m_OutputIds = std::move(other.m_OutputIds);
111  m_LayersInOrder = std::move(other.m_LayersInOrder);
112  m_Views = std::move(other.m_Views);
113 
114  other.ForEachLayer([this](Layer* otherLayer)
115  {
116  otherLayer->Reparent(*this, m_Layers.end());
117  });
118 
119  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
120  ARMNN_ASSERT(other.m_Layers.empty());
121 
122  return *this;
123  }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ Print()

Status Print ( ) const

Definition at line 60 of file Graph.cpp.

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder(), and Graph::~Graph().

61 {
62  if (m_Layers.empty())
63  {
64  ARMNN_LOG(info) << "\n Graph is empty.\n";
65  return Status::Success;
66  }
67  ARMNN_LOG(info) << "\n";
68  ARMNN_LOG(info) << "Walking Pattern: \n";
69 
70  for (auto&& it : TopologicalSort())
71  {
72  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
73  << ":" << it->GetBackendId().Get();
74  }
75  ARMNN_LOG(info) << "\n\n";
76 
77  return Status::Success;
78 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
char const * GetLayerTypeAsCString(LayerType type)
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:174

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 80 of file Graph.cpp.

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

Referenced by BOOST_AUTO_TEST_CASE(), and Graph::~Graph().

81 {
82  {
83  DotGraph graph(stream, "Optimized");
84 
85  {
86  // Default node attributes:
87  DotDefaults nodes(stream, "node");
88  nodes.GetAttributeSet()
89  .AddAttribute("shape", "record");
90  }
91 
92  {
93  // Default edge attributes:
94  DotDefaults edges(stream, "edge");
95  edges.GetAttributeSet()
96  .AddAttribute("fontsize", 8)
97  .AddAttribute("fontcolor", "blue")
98  .AddAttribute("fontname", "arial-bold");
99  }
100 
101  // First declares the nodes.
102  for (auto&& layer : m_Layers)
103  {
104  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
105  // Extracts the layer parameters.
106  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
107  node.GetContents().AddContent(name + " : " + value);
108  };
109  layer->SerializeLayerParameters(extractParams);
110  }
111 
112  // Second declares the edges.
113  for (auto&& layer : m_Layers)
114  {
115  LayerGuid toId = layer->GetGuid();
116 
117  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
118  {
119  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
120  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
121  DotEdge edge(stream, fromId, toId);
122 
123  // Now print the tensor shape on the edge.
124  {
125  // Constructs the label attribute with HTML markup.
126  std::stringstream ss;
127  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
128  edge.GetAttributeSet().AddAttribute("label", ss);
129  }
130  }
131  }
132  }
133 
134  if (stream.bad())
135  {
136  return Status::Failure;
137  }
138  return Status::Success;
139 }
char const * GetLayerTypeAsCString(LayerType type)
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:236
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 395 of file Graph.cpp.

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations(), BOOST_AUTO_TEST_CASE(), and Graph::GetNumLayers().

396 {
397  ARMNN_ASSERT(substituteLayer != nullptr);
398 
399  ReplaceSubgraphConnections(subgraph, substituteLayer);
400  EraseSubgraphLayers(subgraph);
401 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 403 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, SubgraphView::Clear(), IOutputSlot::Connect(), IOutputSlot::Disconnect(), Graph::EraseLayer(), SubgraphView::ForEachLayer(), InputSlot::GetConnection(), SubgraphView::GetInputSlots(), SubgraphView::GetLayers(), SubgraphView::GetOutputSlots(), armnn::IgnoreUnused(), OutputSlot::MoveAllConnections(), armnn::numeric_cast(), Layer::Reparent(), and Graph::TopologicalSort().

404 {
405  // Look through each layer in the new subgraph and add any that are not already a member of this graph
406  substituteSubgraph.ForEachLayer([this](Layer* layer)
407  {
408  if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
409  {
410  layer->Reparent(*this, m_Layers.end());
411  m_LayersInOrder = false;
412  }
413  });
414 
415  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
416  EraseSubgraphLayers(subgraph);
417  TopologicalSort();
418 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:174

◆ TopologicalSort() [1/2]

Graph& TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 174 of file Graph.hpp.

References Graph::TopologicalSort().

Referenced by BOOST_AUTO_TEST_CASE(), CheckOrder(), NetworkQuantizer::ExportNetwork(), Graph::InferTensorInfos(), LoadedNetwork::MakeLoadedNetwork(), Optimizer::Pass(), Graph::Print(), NetworkQuantizer::Refine(), LoadedNetwork::SendNetworkStructure(), Graph::SubstituteSubgraph(), and Graph::TopologicalSort().

174 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:174

◆ TopologicalSort() [2/2]

const Graph & TopologicalSort ( ) const

Definition at line 239 of file Graph.cpp.

240 {
241  if (!m_LayersInOrder)
242  {
243  // Resets layer order.
244  for (auto&& it : m_Layers)
245  {
246  it->ResetPriority();
247  }
248 
249  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
250  {
251  return layerA->GetPriority() < layerB->GetPriority();
252  };
253 
254  m_Layers.sort(compareLayerPriority);
255 
256  m_LayersInOrder = true;
257  }
258 
259  return *this;
260 }

The documentation for this class was generated from the following files: