ArmNN
 22.11
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< ConstantLayer >
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false, bool allowExpandedDims=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots. More...
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 
void SetLayersOutOfOrder ()
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Friends

class SubgraphView
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 56 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 57 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 58 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 53 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 54 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false,
bool  allowExpandedDims = false 
)
inline

Definition at line 98 of file Graph.hpp.

References Graph::operator=().

99  : m_LayersInOrder(true)
100  , m_AllowExpandedDims(allowExpandedDims)
101  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
103  , m_Profiler(std::make_shared<IProfiler>())
104  {}
Validate all output shapes.
Infer missing output shapes and validate all output shapes.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 27 of file Graph.cpp.

References Layer::BeginOutputSlots(), Layer::Clone(), and Layer::GetInputSlot().

28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55  outputSlot->Connect(inputSlot);
56  }
57  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
58  ++outputSlot;
59  }
60  }
61 }

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 110 of file Graph.hpp.

111  {
112  *this = std::move(other);
113  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 135 of file Graph.hpp.

References Graph::AddLayer(), Graph::EraseLayer(), Graph::ForEachLayer(), Graph::InsertNewLayer(), Graph::Print(), and Graph::SerializeToDot().

136  {
137  ForEachLayer([](Layer* layer)
138  {
139  delete layer;
140  });
141  }
void ForEachLayer(Func func) const
Definition: Graph.hpp:40

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 303 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by Graph::GetNumLayers(), and armnn::Optimize().

305 {
306  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
307  // connections to other layers).
308  auto MayNeedCompatibilityLayer = [](const Layer& layer)
309  {
310  // All layers should have been associated with a valid compute device at this point.
311  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
312  // Does not need another compatibility layer if a copy or import layer is already present.
313  return layer.GetType() != LayerType::MemCopy &&
314  layer.GetType() != LayerType::MemImport;
315  };
316 
317  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
318  {
319  return strategy == EdgeStrategy::CopyToTarget ||
320  strategy == EdgeStrategy::ExportToTarget;
321  };
322 
323  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
324  {
325  ARMNN_ASSERT(srcLayer);
326 
327  if (!MayNeedCompatibilityLayer(*srcLayer))
328  {
329  // The current layer does not need copy layers, move to the next one
330  return;
331  }
332 
333  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
334  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
335  {
336  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
337  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
338  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
339  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
340  {
341  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
342  ARMNN_ASSERT(dstInputSlot);
343 
344  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
346  "Undefined memory strategy found while adding copy layers for compatibility");
347 
348  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
349  if (MayNeedCompatibilityLayer(dstLayer) &&
350  IsCompatibilityStrategy(strategy))
351  {
352  // A copy layer is needed in between the source and destination layers.
353  // Record the operation rather than attempting to modify the graph as we go.
354  // (invalidating iterators)
355  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
356  srcLayer->GetName(),
357  srcOutputIndex,
358  dstLayer.GetName(),
359  dstInputSlot->GetSlotIndex());
360  Layer* compLayer = nullptr;
361  if (strategy == EdgeStrategy::CopyToTarget)
362  {
363  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
364  }
365  else
366  {
367  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
368  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
369  }
370 
371  compLayer->SetBackendId(dstLayer.GetBackendId());
372 
373  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
374  auto backendIt = backends.find(dstLayer.GetBackendId());
375  if (backendIt != backends.end() &&
376  backendIt->second &&
377  backendIt->second->SupportsTensorAllocatorAPI())
378  {
379  auto backend = backendIt->second.get();
380  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
381  bool found = false;
382 
383  for (auto preference : tensorHandleFactoryIds)
384  {
385  auto factory = registry.GetFactory(preference);
386  if (factory)
387  {
388  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
389  auto srcFactory = registry.GetFactory(srcPref);
390 
391  if (srcFactory)
392  {
393  bool canExportImport =
394  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
395 
396  if (factory->SupportsMapUnmap() || canExportImport)
397  {
398  compOutputSlot.SetTensorHandleFactory(preference);
399  found = true;
400  break;
401  }
402  }
403  }
404  }
405 
406  if (!found)
407  {
408  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
409  }
410  }
411  else
412  {
413  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
414  }
415 
416  // The output strategy of a compatibility layer is always DirectCompatibility.
417  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
418 
419  // Recalculate the connection index on the previous layer as we have just inserted into it.
420  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
421  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
422  std::find(newSourceConnections.begin(),
423  newSourceConnections.end(),
424  &compLayer->GetInputSlot(0)));
425 
426  // The input strategy of a compatibility layer is always DirectCompatibilty.
427  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
429  }
430  }
431  }
432  });
433 }
No strategy has been defined. Used internally to verify integrity of optimizations.
Source backends tensor data can be exported to destination backend tensor without copy...
Destination backend can work directly with tensors on source backend.
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
static const FactoryId LegacyFactoryId

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 456 of file Graph.hpp.

References armnn::Input, armnn::LayerAdded, and armnn::Output.

Referenced by Layer::CloneBase(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), and Graph::~Graph().

457 {
458  m_LayersInOrder = m_LayersInOrder &&
459  ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
460  LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
461 
462  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
463  layer->SetAllowExpandedDims(m_AllowExpandedDims);
464 
465  NotifyObservables(GraphEvent::LayerAdded, layer);
466 
467  return layer;
468 }

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 181 of file Graph.cpp.

References ITensorHandle::Allocate(), ARMNN_ASSERT, ARMNN_SCOPED_PROFILING_EVENT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

Referenced by Graph::GetNumLayers().

182 {
183  // Layers must be sorted in topological order
184  ARMNN_ASSERT(m_LayersInOrder);
185  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
186 
187  std::unordered_set<const ITensorHandle*> preallocatedTensors;
188  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
189 
190  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
191  // is a TensorHandle, the function just returns it
192  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
193  {
194  ITensorHandle* ancestor = subTensorHandle;
195  while (ancestor && ancestor->GetParent())
196  {
197  ancestor = ancestor->GetParent();
198  }
199  return ancestor;
200  };
201 
202  // Checks whether a TensorHandle has been pre-allocated
203  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
204  {
205  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
206  };
207 
208  // Constant tensor handles need to last from the beginning of execution till the end,
209  // therefore we pre-allocate them upfront
210  for (auto&& layer : m_Layers)
211  {
212  if (layer->GetType() == LayerType::Constant)
213  {
214  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
215  {
216  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
217 
218  if (tensorHandle && !IsPreallocated(tensorHandle))
219  {
220  tensorHandle->Allocate();
221  preallocatedTensors.insert(tensorHandle);
222  }
223  }
224  }
225  }
226 
227  // Iterate over the network in topological order
228  for (auto&& layer : m_Layers)
229  {
230  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
231  // The first time we encounter a new tensor handle, we start managing its lifetime.
232  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
233  {
234  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
235 
236  if (tensorHandle && !IsPreallocated(tensorHandle))
237  {
238  unsigned int numConnections = slot->GetNumConnections();
239  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
240  {
241  handleReferenceCounts[tensorHandle] = numConnections;
242  tensorHandle->Manage();
243  if (handleReferenceCounts[tensorHandle] == 0u)
244  {
245  // if nobody consumes this tensor we call Allocate()
246  tensorHandle->Allocate();
247  }
248  }
249  else
250  {
251  handleReferenceCounts[tensorHandle] += numConnections;
252  }
253  }
254  }
255 
256  // Loop through the input slots in the same layer and decrement the reference counter associated
257  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
258  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
259  {
260  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
261  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
262 
263  if (tensorHandle && !IsPreallocated(tensorHandle))
264  {
265  --handleReferenceCounts[tensorHandle];
266 
267  if (handleReferenceCounts[tensorHandle] == 0u)
268  {
269  // Stop managing lifetime of tensor handle
270  tensorHandle->Allocate();
271  handleReferenceCounts.erase(tensorHandle);
272  }
273  }
274  }
275  }
276 
277  return Status::Success;
278 }
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 219 of file Graph.hpp.

Referenced by GraphObservable< Layer *>::GraphObservable().

219  {
220  m_Views[notifyOnEvent].emplace_back(observable);
221  }

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 169 of file Graph.hpp.

Referenced by armnn::Optimize(), and Optimizer::Pass().

169 { return m_Layers.begin(); }

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 174 of file Graph.hpp.

174 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 179 of file Graph.hpp.

References Graph::InputLayersAccessor::begin().

179 { return begin(); }
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:169

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 181 of file Graph.hpp.

References Graph::InputLayersAccessor::end().

181 { return end(); }
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:171

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 223 of file Graph.hpp.

References armnn::Constant, Graph::GetPosInGraph(), Graph::GetProfiler(), armnn::Input, armnn::Output, and Graph::SetLayersOutOfOrder().

Referenced by GraphObservable< Layer *>::~GraphObservable().

223  {
224  m_Views[notifyOnEvent].remove(observable);
225  }

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 171 of file Graph.hpp.

Referenced by armnn::Optimize(), and Optimizer::Pass().

171 { return m_Layers.end(); }

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 176 of file Graph.hpp.

176 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 512 of file Graph.hpp.

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

513 {
514  ARMNN_ASSERT(layer != nullptr);
515  EraseLayer(GetPosInGraph(*layer));
516  layer = nullptr;
517 }
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:448
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

Referenced by Graph::AddCompatibilityLayers(), Graph::operator=(), armnn::SelectTensorHandleStrategy(), and Graph::~Graph().

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 192 of file Graph.hpp.

References Graph::InputLayersAccessor::InputLayersAccessor().

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

192 { return InputLayersAccessor(*this); }

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 187 of file Graph.hpp.

Referenced by Graph::InputLayersAccessor::end(), LoadedNetwork::EnqueueWorkload(), LoadedNetwork::Execute(), and LoadedNetwork::MakeLoadedNetwork().

187 { return m_InputIds.size(); }

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 188 of file Graph.hpp.

Referenced by Graph::OutputLayersAccessor::begin(), LoadedNetwork::EnqueueWorkload(), LoadedNetwork::Execute(), and LoadedNetwork::MakeLoadedNetwork().

188 { return m_OutputIds.size(); }

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 196 of file Graph.hpp.

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

196 { return OutputLayersAccessor(*this); }

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 448 of file Graph.hpp.

References ARMNN_ASSERT.

Referenced by Graph::DetachObservable(), Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

449 {
450  auto it = m_PosInGraphMap.find(&layer);
451  ARMNN_ASSERT(it != m_PosInGraphMap.end());
452  return it->second;
453 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 650 of file Graph.cpp.

Referenced by Graph::DetachObservable(), and armnn::Optimize().

651 {
652  return m_Profiler;
653 }

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 562 of file Graph.cpp.

References armnn::Convolution2d, armnn::Convolution3d, armnn::DepthwiseConvolution2d, armnn::FullyConnected, InputSlot::GetConnectedOutputSlot(), Layer::GetInputSlot(), armnn::GetLayerTypeAsCString(), Layer::GetName(), Layer::GetNumInputSlots(), Layer::GetType(), IOutputSlot::IsTensorInfoSet(), Graph::TopologicalSort(), and armnn::ValidateOnly.

Referenced by Graph::GetNumLayers(), and armnn::Optimize().

563 {
564  for (auto&& layer : TopologicalSort())
565  {
566  for (auto&& input : layer->GetInputSlots())
567  {
568  const IOutputSlot* source = input.GetConnectedOutputSlot();
569  if (source == NULL)
570  {
571  // Throws exception due to a layer input not being connected to an output slot.
572  // Verifies input slot weights and bias are set for FullyConnected layers.
573  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
574  }
575 
576  if (!source->IsTensorInfoSet())
577  {
578  std::ostringstream message;
579  message << "Output slot TensorInfo not set on "
580  << GetLayerTypeAsCString(layer->GetType())
581  << " layer "
582  << std::quoted(layer->GetName());
583  throw LayerValidationException(message.str());
584  }
585  }
586 
587  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
588  {
589  layer->ValidateTensorShapesFromInputs();
590  }
591  }
592 }
Validate all output shapes.
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
const char * GetLayerTypeAsCString(LayerType type)

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 471 of file Graph.hpp.

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), MoveTransposeUpImpl::Run(), PermuteDepthwiseConv2dWeightsImpl::Run(), MovePermuteUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), AddBroadcastReshapeLayerImpl::Run(), and Graph::~Graph().

472 {
473  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
474  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
475  const Iterator pos = (parentOut != nullptr)
476  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
477  : GetPosInGraph(insertBefore.GetOwningLayer());
478  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
479  insertBefore.Insert(*layer);
480 
481  NotifyObservables(GraphEvent::LayerAdded, layer);
482 
483  return layer;
484 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:448

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 487 of file Graph.hpp.

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

488 {
489  Layer& owningLayer = insertAfter.GetOwningLayer();
490 
491  const Iterator pos = std::next(GetPosInGraph(owningLayer));
492  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
493 
494  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
495 
496  insertAfter.MoveAllConnections(layer->GetOutputSlot());
497  insertAfter.Connect(layer->GetInputSlot(0));
498 
499  NotifyObservables(GraphEvent::LayerAdded, layer);
500 
501  return layer;
502 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:53
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:448
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

Referenced by Graph::Graph().

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 115 of file Graph.hpp.

References ARMNN_ASSERT, Graph::ForEachLayer(), and Layer::Reparent().

116  {
117  m_InputIds = std::move(other.m_InputIds);
118  m_OutputIds = std::move(other.m_OutputIds);
119  m_LayersInOrder = std::move(other.m_LayersInOrder);
120  m_Views = std::move(other.m_Views);
121  m_Profiler = std::move(other.m_Profiler);
122  m_AllowExpandedDims = other.m_AllowExpandedDims;
123  m_ShapeInferenceMethod = other.m_ShapeInferenceMethod;
124  other.ForEachLayer([this](Layer* otherLayer)
125  {
126  otherLayer->Reparent(*this, m_Layers.end());
127  });
128 
129  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
130  ARMNN_ASSERT(other.m_Layers.empty());
131 
132  return *this;
133  }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ Print()

Status Print ( ) const

Definition at line 63 of file Graph.cpp.

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder(), and Graph::~Graph().

64 {
65  if (m_Layers.empty())
66  {
67  ARMNN_LOG(info) << "\n Graph is empty.\n";
68  return Status::Success;
69  }
70  ARMNN_LOG(info) << "\n";
71  ARMNN_LOG(info) << "Walking Pattern: \n";
72 
73  for (auto&& it : TopologicalSort())
74  {
75  auto numInputSlots = it->GetNumInputSlots();
76  auto numOutputSlots = it->GetNumOutputSlots();
77 
78  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
79  << ":" << it->GetBackendId().Get()
80  << " has " << numInputSlots << " input slots"
81  << " and " << numOutputSlots << " output slots.";
82 
83  for (auto i : it->GetInputSlots())
84  {
85  std::ostringstream message;
86  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
87  unsigned int numDims = inputTensorShape.GetNumDimensions();
88 
89  message << "The input slot has shape [ ";
90  for (unsigned int dim=0; dim < numDims; dim++)
91  {
92  message << inputTensorShape[dim] << ",";
93  }
94  message << " ]";
95  ARMNN_LOG(info) << message.str();
96  }
97 
98  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
99  {
100  const armnn::Layer *layer = it;
101  std::ostringstream message;
102  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
103  unsigned int numDims = outputTensorShape.GetNumDimensions();
104 
105  message << "The output slot has shape [ ";
106  for (unsigned int dim=0; dim < numDims; dim++)
107  {
108  message << outputTensorShape[dim] << ",";
109  }
110  message << " ]";
111  ARMNN_LOG(info) << message.str();
112  }
113  ARMNN_LOG(info) << "\n";
114  }
115  ARMNN_LOG(info) << "\n\n";
116 
117  return Status::Success;
118 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:246
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
const char * GetLayerTypeAsCString(LayerType type)

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 120 of file Graph.cpp.

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

Referenced by Graph::~Graph().

121 {
122  {
123  DotGraph graph(stream, "Optimized");
124 
125  {
126  // Default node attributes:
127  DotDefaults nodes(stream, "node");
128  nodes.GetAttributeSet()
129  .AddAttribute("shape", "record");
130  }
131 
132  {
133  // Default edge attributes:
134  DotDefaults edges(stream, "edge");
135  edges.GetAttributeSet()
136  .AddAttribute("fontsize", 8)
137  .AddAttribute("fontcolor", "blue")
138  .AddAttribute("fontname", "arial-bold");
139  }
140 
141  // First declares the nodes.
142  for (auto&& layer : m_Layers)
143  {
144  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
145  // Extracts the layer parameters.
146  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
147  node.GetContents().AddContent(name + " : " + value);
148  };
149  layer->SerializeLayerParameters(extractParams);
150  }
151 
152  // Second declares the edges.
153  for (auto&& layer : m_Layers)
154  {
155  LayerGuid toId = layer->GetGuid();
156 
157  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
158  {
159  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
160  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
161  DotEdge edge(stream, fromId, toId);
162 
163  // Now print the tensor shape on the edge.
164  {
165  // Constructs the label attribute with HTML markup.
166  std::stringstream ss;
167  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
168  edge.GetAttributeSet().AddAttribute("label", ss);
169  }
170  }
171  }
172  }
173 
174  if (stream.bad())
175  {
176  return Status::Failure;
177  }
178  return Status::Success;
179 }
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const char * GetLayerTypeAsCString(LayerType type)

◆ SetLayersOutOfOrder()

void SetLayersOutOfOrder ( )

Definition at line 655 of file Graph.cpp.

Referenced by Graph::DetachObservable(), and LoadedNetwork::MakeLoadedNetwork().

656 {
657  m_LayersInOrder = false;
658 }

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 435 of file Graph.cpp.

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations(), and Graph::GetNumLayers().

436 {
437  ARMNN_ASSERT(substituteLayer != nullptr);
438 
439  // Create a new sub-graph with only the given layer, using
440  // the given sub-graph as a reference of which parent graph to use
441  SubgraphView substituteSubgraph(substituteLayer);
442 
443  SubstituteSubgraph(subgraph, substituteSubgraph);
444 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:435
friend class SubgraphView
Definition: Graph.hpp:300

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 446 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, SubgraphView::Clear(), IOutputSlot::Connect(), IOutputSlot::Disconnect(), Graph::EraseLayer(), SubgraphView::ForEachIConnectableLayer(), IInputSlot::GetConnection(), SubgraphView::GetIConnectableLayers(), SubgraphView::GetIInputSlots(), SubgraphView::GetIOutputSlots(), armnn::IgnoreUnused(), armnn::numeric_cast(), and Graph::TopologicalSort().

447 {
448  // Look through each layer in the new subgraph and add any that are not already a member of this graph
449  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
450  {
451  if (std::find(std::begin(m_Layers),
452  std::end(m_Layers),
453  iConnectableLayer) == std::end(m_Layers))
454  {
455  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
456  layer->Reparent(*this, m_Layers.end());
457  m_LayersInOrder = false;
458  }
459  });
460 
461  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
462  EraseSubgraphLayers(subgraph);
463  TopologicalSort();
464 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184

◆ TopologicalSort() [1/2]

Graph& TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 184 of file Graph.hpp.

References Graph::TopologicalSort().

Referenced by CheckOrder(), LoadedNetwork::ImportInputs(), LoadedNetwork::ImportOutputs(), Graph::InferTensorInfos(), LoadedNetwork::MakeLoadedNetwork(), Optimizer::Pass(), Graph::Print(), LoadedNetwork::RegisterDebugCallback(), LoadedNetwork::SendNetworkStructure(), Graph::SubstituteSubgraph(), Graph::TopologicalSort(), and Graph::VerifyConstantLayerSetTensorInfo().

184 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98

◆ TopologicalSort() [2/2]

const Graph & TopologicalSort ( ) const

Definition at line 280 of file Graph.cpp.

281 {
282  if (!m_LayersInOrder)
283  {
284  // Resets layer order.
285  for (auto&& it : m_Layers)
286  {
287  it->ResetPriority();
288  }
289 
290  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
291  {
292  return layerA->GetPriority() < layerB->GetPriority();
293  };
294 
295  m_Layers.sort(compareLayerPriority);
296 
297  m_LayersInOrder = true;
298  }
299 
300  return *this;
301 }

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 539 of file Graph.cpp.

References armnn::Constant, armnn::GetLayerTypeAsCString(), and Graph::TopologicalSort().

Referenced by Graph::GetNumLayers(), and armnn::Optimize().

540 {
541  for (auto&& layer : TopologicalSort())
542  {
543  if (layer->GetType() == armnn::LayerType::Constant)
544  {
545  for (auto&& output: layer->GetOutputSlots())
546  {
547  if (!output.IsTensorInfoSet())
548  {
549  std::ostringstream message;
550  message << "Output slot TensorInfo not set on "
551  << GetLayerTypeAsCString(layer->GetType())
552  << " layer \""
553  << layer->GetName()
554  << "\"";
555  throw LayerValidationException(message.str());
556  }
557  }
558  }
559  }
560 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
const char * GetLayerTypeAsCString(LayerType type)

Friends And Related Function Documentation

◆ SubgraphView

friend class SubgraphView
friend

Definition at line 300 of file Graph.hpp.


The documentation for this class was generated from the following files: