ArmNN
 21.02
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Detailed Description

Definition at line 29 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 53 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 54 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 55 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 50 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 51 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 49 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false)
inline

Definition at line 95 of file Graph.hpp.

References Graph::operator=().

96  : m_LayersInOrder(true)
97  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
99  {}
Validate all output shapes.
Infer missing output shapes and validate all output shapes.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 28 of file Graph.cpp.

References Layer::BeginOutputSlots(), Layer::Clone(), and Layer::GetInputSlot().

29 : m_LayersInOrder(other.m_LayersInOrder)
30 {
31  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
32 
33  for (auto&& otherLayer : other.m_Layers)
34  {
35  Layer* const layer = otherLayer->Clone(*this);
36  otherToClonedMap.emplace(otherLayer, layer);
37  }
38 
39  // Copies slot connections.
40  for (auto&& otherLayer : other.m_Layers)
41  {
42  Layer* const thisLayer = otherToClonedMap[otherLayer];
43 
44  auto outputSlot = thisLayer->BeginOutputSlots();
45  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
46  {
47  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
48  {
49  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
50  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
51 
52  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
53  outputSlot->Connect(inputSlot);
54  }
55  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
56  ++outputSlot;
57  }
58  }
59 }

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 105 of file Graph.hpp.

106  {
107  *this = std::move(other);
108  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 128 of file Graph.hpp.

References Graph::AddLayer(), Graph::EraseLayer(), Graph::ForEachLayer(), Graph::InsertNewLayer(), Graph::Print(), and Graph::SerializeToDot().

129  {
130  ForEachLayer([](Layer* layer)
131  {
132  delete layer;
133  });
134  }
void ForEachLayer(Func func) const
Definition: Graph.hpp:39

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 300 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), Graph::GetNumLayers(), and armnn::Optimize().

302 {
303  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
304  // connections to other layers).
305  auto MayNeedCompatibilityLayer = [](const Layer& layer)
306  {
307  // All layers should have been associated with a valid compute device at this point.
308  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
309  // Does not need another compatibility layer if a copy or import layer is already present.
310  return layer.GetType() != LayerType::MemCopy &&
311  layer.GetType() != LayerType::MemImport;
312  };
313 
314  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
315  {
316  return strategy == EdgeStrategy::CopyToTarget ||
317  strategy == EdgeStrategy::ExportToTarget;
318  };
319 
320  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
321  {
322  ARMNN_ASSERT(srcLayer);
323 
324  if (!MayNeedCompatibilityLayer(*srcLayer))
325  {
326  // The current layer does not need copy layers, move to the next one
327  return;
328  }
329 
330  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
331  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
332  {
333  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
334  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
335  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
336  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
337  {
338  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
339  ARMNN_ASSERT(dstInputSlot);
340 
341  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
343  "Undefined memory strategy found while adding copy layers for compatibility");
344 
345  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
346  if (MayNeedCompatibilityLayer(dstLayer) &&
347  IsCompatibilityStrategy(strategy))
348  {
349  // A copy layer is needed in between the source and destination layers.
350  // Record the operation rather than attempting to modify the graph as we go.
351  // (invalidating iterators)
352  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
353  srcLayer->GetName(),
354  srcOutputIndex,
355  dstLayer.GetName(),
356  dstInputSlot->GetSlotIndex());
357  Layer* compLayer = nullptr;
358  if (strategy == EdgeStrategy::CopyToTarget)
359  {
360  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
361  }
362  else
363  {
364  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
365  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
366  }
367 
368  compLayer->SetBackendId(dstLayer.GetBackendId());
369 
370  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
371  auto backendIt = backends.find(dstLayer.GetBackendId());
372  if (backendIt != backends.end() &&
373  backendIt->second &&
374  backendIt->second->SupportsTensorAllocatorAPI())
375  {
376  auto backend = backendIt->second.get();
377  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
378  bool found = false;
379 
380  for (auto preference : tensorHandleFactoryIds)
381  {
382  auto factory = registry.GetFactory(preference);
383  if (factory)
384  {
385  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
386  auto srcFactory = registry.GetFactory(srcPref);
387 
388  if (srcFactory)
389  {
390  bool canExportImport =
391  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
392 
393  if (factory->SupportsMapUnmap() || canExportImport)
394  {
395  compOutputSlot.SetTensorHandleFactory(preference);
396  found = true;
397  break;
398  }
399  }
400  }
401  }
402 
403  if (!found)
404  {
405  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
406  }
407  }
408  else
409  {
410  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
411  }
412 
413  // The output strategy of a compatibility layer is always DirectCompatibility.
414  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
415 
416  // Recalculate the connection index on the previous layer as we have just inserted into it.
417  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
418  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
419  std::find(newSourceConnections.begin(),
420  newSourceConnections.end(),
421  &compLayer->GetInputSlot(0)));
422 
423  // The input strategy of a compatibility layer is always DirectCompatibilty.
424  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
426  }
427  }
428  }
429  });
430 }
No strategy has been defined. Used internally to verify integrity of optimizations.
Source backends tensor data can be exported to destination backend tensor without copy...
Destination backend can work directly with tensors on source backend.
void ForEachLayer(Func func) const
Definition: Graph.hpp:39
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
static const FactoryId LegacyFactoryId

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 179 of file Graph.cpp.

References ITensorHandle::Allocate(), ARMNN_ASSERT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), and armnn::Success.

Referenced by BOOST_AUTO_TEST_CASE(), and Graph::GetNumLayers().

180 {
181  // Layers must be sorted in topological order
182  ARMNN_ASSERT(m_LayersInOrder);
183 
184  std::unordered_set<const ITensorHandle*> preallocatedTensors;
185  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
186 
187  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
188  // is a TensorHandle, the function just returns it
189  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
190  {
191  ITensorHandle* ancestor = subTensorHandle;
192  while (ancestor && ancestor->GetParent())
193  {
194  ancestor = ancestor->GetParent();
195  }
196  return ancestor;
197  };
198 
199  // Checks whether a TensorHandle has been pre-allocated
200  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
201  {
202  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
203  };
204 
205  // Constant tensor handles need to last from the beginning of execution till the end,
206  // therefore we pre-allocate them upfront
207  for (auto&& layer : m_Layers)
208  {
209  if (layer->GetType() == LayerType::Constant)
210  {
211  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
212  {
213  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
214 
215  if (tensorHandle && !IsPreallocated(tensorHandle))
216  {
217  tensorHandle->Allocate();
218  preallocatedTensors.insert(tensorHandle);
219  }
220  }
221  }
222  }
223 
224  // Iterate over the network in topological order
225  for (auto&& layer : m_Layers)
226  {
227  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
228  // The first time we encounter a new tensor handle, we start managing its lifetime.
229  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
230  {
231  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
232 
233  if (tensorHandle && !IsPreallocated(tensorHandle))
234  {
235  unsigned int numConnections = slot->GetNumConnections();
236  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
237  {
238  handleReferenceCounts[tensorHandle] = numConnections;
239  tensorHandle->Manage();
240  if (handleReferenceCounts[tensorHandle] == 0u)
241  {
242  // if nobody consumes this tensor we call Allocate()
243  tensorHandle->Allocate();
244  }
245  }
246  else
247  {
248  handleReferenceCounts[tensorHandle] += numConnections;
249  }
250  }
251  }
252 
253  // Loop through the input slots in the same layer and decrement the reference counter associated
254  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
255  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
256  {
257  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
258  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
259 
260  if (tensorHandle && !IsPreallocated(tensorHandle))
261  {
262  --handleReferenceCounts[tensorHandle];
263 
264  if (handleReferenceCounts[tensorHandle] == 0u)
265  {
266  // Stop managing lifetime of tensor handle
267  tensorHandle->Allocate();
268  handleReferenceCounts.erase(tensorHandle);
269  }
270  }
271  }
272  }
273 
274  return Status::Success;
275 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 208 of file Graph.hpp.

Referenced by GraphObservable< Layer *>::GraphObservable().

208  {
209  m_Views[notifyOnEvent].emplace_back(observable);
210  }

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 162 of file Graph.hpp.

Referenced by BOOST_AUTO_TEST_CASE(), armnn::Optimize(), and Optimizer::Pass().

162 { return m_Layers.begin(); }

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 167 of file Graph.hpp.

167 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 172 of file Graph.hpp.

References Graph::InputLayersAccessor::begin().

Referenced by AddBroadcastReshapeLayerOptimizerTest(), and BOOST_AUTO_TEST_CASE().

172 { return begin(); }
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:162

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 174 of file Graph.hpp.

References Graph::InputLayersAccessor::end().

Referenced by AddBroadcastReshapeLayerOptimizerTest(), and BOOST_AUTO_TEST_CASE().

174 { return end(); }
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:164

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 212 of file Graph.hpp.

References ARMNN_ASSERT, Graph::GetPosInGraph(), armnn::IgnoreUnused(), armnn::Input, Graph::InputLayersAccessor::m_Graph, and armnn::Output.

Referenced by GraphObservable< Layer *>::~GraphObservable().

212  {
213  m_Views[notifyOnEvent].remove(observable);
214  }

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 164 of file Graph.hpp.

Referenced by BOOST_AUTO_TEST_CASE(), armnn::Optimize(), and Optimizer::Pass().

164 { return m_Layers.end(); }

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 169 of file Graph.hpp.

169 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 457 of file Graph.hpp.

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

458 {
459  ARMNN_ASSERT(layer != nullptr);
460  EraseLayer(GetPosInGraph(*layer));
461  layer = nullptr;
462 }
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:449
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:394
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 39 of file Graph.hpp.

Referenced by Graph::AddCompatibilityLayers(), BOOST_AUTO_TEST_CASE(), Graph::operator=(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

40  {
41  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
42  {
43  auto next = std::next(it);
44  func(*it);
45  it = next;
46  }
47  }

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 185 of file Graph.hpp.

References Graph::InputLayersAccessor::InputLayersAccessor().

Referenced by armnn::BOOST_AUTO_TEST_CASE(), LoadedNetwork::EnqueueWorkload(), armnn::GetInputTensorInfo(), and NetworkQuantizer::OverrideInputRange().

185 { return InputLayersAccessor(*this); }

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 180 of file Graph.hpp.

Referenced by Graph::InputLayersAccessor::end(), and LoadedNetwork::EnqueueWorkload().

180 { return m_InputIds.size(); }

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 181 of file Graph.hpp.

Referenced by Graph::OutputLayersAccessor::begin(), and LoadedNetwork::EnqueueWorkload().

181 { return m_OutputIds.size(); }

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 189 of file Graph.hpp.

Referenced by LoadedNetwork::EnqueueWorkload().

189 { return OutputLayersAccessor(*this); }

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 394 of file Graph.hpp.

References ARMNN_ASSERT.

Referenced by Graph::DetachObservable(), Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

395 {
396  auto it = m_PosInGraphMap.find(&layer);
397  ARMNN_ASSERT(it != m_PosInGraphMap.end());
398  return it->second;
399 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 529 of file Graph.cpp.

References armnn::GetLayerTypeAsCString(), IOutputSlot::IsTensorInfoSet(), Graph::TopologicalSort(), and armnn::ValidateOnly.

Referenced by BOOST_AUTO_TEST_CASE(), Graph::GetNumLayers(), armnn::Optimize(), PreluValidateTensorShapesFromInputsMatchTest(), PreluValidateTensorShapesFromInputsNoMatchTest(), StackValidateTensorShapesFromInputsMatchTest(), and StackValidateTensorShapesFromInputsNoMatchTest().

530 {
531  for (auto&& layer : TopologicalSort())
532  {
533  for (auto&& input : layer->GetInputSlots())
534  {
535  const IOutputSlot* source = input.GetConnectedOutputSlot();
536  if (source == NULL)
537  {
538  std::ostringstream message;
539  message << "Input not connected on "
540  << GetLayerTypeAsCString(layer->GetType())
541  << " layer \""
542  << layer->GetName()
543  << "\"";
544  throw LayerValidationException(message.str());
545  }
546 
547  if (!source->IsTensorInfoSet())
548  {
549  throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
550  }
551 
552  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
553  {
554  layer->ValidateTensorShapesFromInputs();
555  }
556  }
557  }
558 }
Validate all output shapes.
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177
const char * GetLayerTypeAsCString(LayerType type)

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 416 of file Graph.hpp.

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by BOOST_AUTO_TEST_CASE(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), FoldPadIntoConvolution2dImpl::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), AddBroadcastReshapeLayerImpl::Run(), and Graph::~Graph().

417 {
418  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
419  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
420  const Iterator pos = (parentOut != nullptr)
421  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
422  : GetPosInGraph(insertBefore.GetOwningLayer());
423  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
424  insertBefore.Insert(*layer);
425 
426  NotifyObservables(GraphEvent::LayerAdded, layer);
427 
428  return layer;
429 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:394

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 432 of file Graph.hpp.

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

433 {
434  Layer& owningLayer = insertAfter.GetOwningLayer();
435 
436  const Iterator pos = std::next(GetPosInGraph(owningLayer));
437  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
438 
439  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
440 
441  insertAfter.MoveAllConnections(layer->GetOutputSlot());
442  insertAfter.Connect(layer->GetInputSlot(0));
443 
444  NotifyObservables(GraphEvent::LayerAdded, layer);
445 
446  return layer;
447 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:50
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:394
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

Referenced by Graph::Graph().

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 110 of file Graph.hpp.

References ARMNN_ASSERT, Graph::ForEachLayer(), and Layer::Reparent().

111  {
112  m_InputIds = std::move(other.m_InputIds);
113  m_OutputIds = std::move(other.m_OutputIds);
114  m_LayersInOrder = std::move(other.m_LayersInOrder);
115  m_Views = std::move(other.m_Views);
116 
117  other.ForEachLayer([this](Layer* otherLayer)
118  {
119  otherLayer->Reparent(*this, m_Layers.end());
120  });
121 
122  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
123  ARMNN_ASSERT(other.m_Layers.empty());
124 
125  return *this;
126  }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ Print()

Status Print ( ) const

Definition at line 61 of file Graph.cpp.

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder(), and Graph::~Graph().

62 {
63  if (m_Layers.empty())
64  {
65  ARMNN_LOG(info) << "\n Graph is empty.\n";
66  return Status::Success;
67  }
68  ARMNN_LOG(info) << "\n";
69  ARMNN_LOG(info) << "Walking Pattern: \n";
70 
71  for (auto&& it : TopologicalSort())
72  {
73  auto numInputSlots = it->GetNumInputSlots();
74  auto numOutputSlots = it->GetNumOutputSlots();
75 
76  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
77  << ":" << it->GetBackendId().Get()
78  << " has " << numInputSlots << " input slots"
79  << " and " << numOutputSlots << " output slots.";
80 
81  for (auto i : it->GetInputSlots())
82  {
83  std::ostringstream message;
84  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
85  unsigned int numDims = inputTensorShape.GetNumDimensions();
86 
87  message << "The input slot has shape [ ";
88  for (unsigned int dim=0; dim < numDims; dim++)
89  {
90  message << inputTensorShape[dim] << ",";
91  }
92  message << " ]";
93  ARMNN_LOG(info) << message.str();
94  }
95 
96  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
97  {
98  const armnn::Layer *layer = it;
99  std::ostringstream message;
100  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
101  unsigned int numDims = outputTensorShape.GetNumDimensions();
102 
103  message << "The output slot has shape [ ";
104  for (unsigned int dim=0; dim < numDims; dim++)
105  {
106  message << outputTensorShape[dim] << ",";
107  }
108  message << " ]";
109  ARMNN_LOG(info) << message.str();
110  }
111  ARMNN_LOG(info) << "\n";
112  }
113  ARMNN_LOG(info) << "\n\n";
114 
115  return Status::Success;
116 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:238
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177
const char * GetLayerTypeAsCString(LayerType type)

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 33 of file Graph.hpp.

34  {
35  return PolymorphicDowncast<LayerType*>(layer);
36  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 118 of file Graph.cpp.

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

Referenced by BOOST_AUTO_TEST_CASE(), and Graph::~Graph().

119 {
120  {
121  DotGraph graph(stream, "Optimized");
122 
123  {
124  // Default node attributes:
125  DotDefaults nodes(stream, "node");
126  nodes.GetAttributeSet()
127  .AddAttribute("shape", "record");
128  }
129 
130  {
131  // Default edge attributes:
132  DotDefaults edges(stream, "edge");
133  edges.GetAttributeSet()
134  .AddAttribute("fontsize", 8)
135  .AddAttribute("fontcolor", "blue")
136  .AddAttribute("fontname", "arial-bold");
137  }
138 
139  // First declares the nodes.
140  for (auto&& layer : m_Layers)
141  {
142  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
143  // Extracts the layer parameters.
144  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
145  node.GetContents().AddContent(name + " : " + value);
146  };
147  layer->SerializeLayerParameters(extractParams);
148  }
149 
150  // Second declares the edges.
151  for (auto&& layer : m_Layers)
152  {
153  LayerGuid toId = layer->GetGuid();
154 
155  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
156  {
157  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
158  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
159  DotEdge edge(stream, fromId, toId);
160 
161  // Now print the tensor shape on the edge.
162  {
163  // Constructs the label attribute with HTML markup.
164  std::stringstream ss;
165  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
166  edge.GetAttributeSet().AddAttribute("label", ss);
167  }
168  }
169  }
170  }
171 
172  if (stream.bad())
173  {
174  return Status::Failure;
175  }
176  return Status::Success;
177 }
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:275
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const char * GetLayerTypeAsCString(LayerType type)

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 432 of file Graph.cpp.

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations(), BOOST_AUTO_TEST_CASE(), and Graph::GetNumLayers().

433 {
434  ARMNN_ASSERT(substituteLayer != nullptr);
435 
436  ReplaceSubgraphConnections(subgraph, substituteLayer);
437  EraseSubgraphLayers(subgraph);
438 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 440 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, SubgraphView::Clear(), IOutputSlot::Connect(), IOutputSlot::Disconnect(), Graph::EraseLayer(), SubgraphView::ForEachLayer(), InputSlot::GetConnection(), SubgraphView::GetInputSlots(), SubgraphView::GetLayers(), SubgraphView::GetOutputSlots(), armnn::IgnoreUnused(), OutputSlot::MoveAllConnections(), armnn::numeric_cast(), Layer::Reparent(), and Graph::TopologicalSort().

441 {
442  // Look through each layer in the new subgraph and add any that are not already a member of this graph
443  substituteSubgraph.ForEachLayer([this](Layer* layer)
444  {
445  if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
446  {
447  layer->Reparent(*this, m_Layers.end());
448  m_LayersInOrder = false;
449  }
450  });
451 
452  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
453  EraseSubgraphLayers(subgraph);
454  TopologicalSort();
455 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177

◆ TopologicalSort() [1/2]

Graph& TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 177 of file Graph.hpp.

References Graph::TopologicalSort().

Referenced by BOOST_AUTO_TEST_CASE(), CheckOrder(), NetworkQuantizer::ExportNetwork(), Graph::InferTensorInfos(), LoadedNetwork::MakeLoadedNetwork(), Optimizer::Pass(), Graph::Print(), NetworkQuantizer::Refine(), LoadedNetwork::SendNetworkStructure(), Graph::SubstituteSubgraph(), and Graph::TopologicalSort().

177 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }
Graph(bool shapeInferenceMethod=false)
Definition: Graph.hpp:95
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177

◆ TopologicalSort() [2/2]

const Graph & TopologicalSort ( ) const

Definition at line 277 of file Graph.cpp.

278 {
279  if (!m_LayersInOrder)
280  {
281  // Resets layer order.
282  for (auto&& it : m_Layers)
283  {
284  it->ResetPriority();
285  }
286 
287  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
288  {
289  return layerA->GetPriority() < layerB->GetPriority();
290  };
291 
292  m_Layers.sort(compareLayerPriority);
293 
294  m_LayersInOrder = true;
295  }
296 
297  return *this;
298 }

The documentation for this class was generated from the following files: