ArmNN
 21.11
Graph Class Reference

#include <Graph.hpp>

Classes

struct  InputLayersAccessor
 Wrapper class returned by Graph::GetInputLayers() More...
 
class  LayerInGraph< InputLayer >
 Inputs add/remove their binding id to m_InputIds in the graph. More...
 
class  LayerInGraph< OutputLayer >
 Outputs add/remove their binding id to m_OutputIds in the graph. More...
 
struct  OutputLayersAccessor
 Wrapper class returned by Graph::GetOutputLayers() More...
 

Public Types

using LayerList = std::list< Layer * >
 
using Iterator = LayerList::const_iterator
 
using IteratorDifference = Iterator::difference_type
 
using ConstIterator = TransformIterator< decltype(&PtrCast< const Layer >), Iterator >
 
using ConstIteratorInputs = TransformIterator< decltype(&PtrCast< const InputLayer >), Iterator >
 
using ConstIteratorOutputs = TransformIterator< decltype(&PtrCast< const OutputLayer >), Iterator >
 

Public Member Functions

template<typename Func >
void ForEachLayer (Func func) const
 
 Graph (bool shapeInferenceMethod=false)
 
 Graph (const Graph &other)
 
Graphoperator= (const Graph &other)=delete
 
 Graph (Graph &&other)
 
Graphoperator= (Graph &&other)
 
 ~Graph ()
 
Status Print () const
 
Status SerializeToDot (std::ostream &stream)
 
template<typename LayerT , typename... Args>
LayerT * AddLayer (Args &&... args)
 Adds a new layer, of type LayerType, to the graph constructed with the arguments passed. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (InputSlot &insertBefore, Args &&... args)
 Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself. More...
 
template<typename LayerT , typename... Args>
LayerT * InsertNewLayer (OutputSlot &insertAfter, Args &&... args)
 Inserts a new layer between insertAfter and the input slot(s) currently connected to it. More...
 
void EraseLayer (Iterator pos)
 Deletes the layer at the specified position. More...
 
template<typename LayerT >
void EraseLayer (LayerT *&layer)
 Deletes the layer. More...
 
Iterator begin ()
 Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
Iterator end ()
 Returns iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator begin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator end () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
ConstIterator cbegin () const
 Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops. More...
 
ConstIterator cend () const
 Returns const iterator pointing to the end of the list. Lowercase for range-based for loops. More...
 
GraphTopologicalSort ()
 Sorts layers in topological order and return this. More...
 
const GraphTopologicalSort () const
 
size_t GetNumInputs () const
 
size_t GetNumOutputs () const
 
InputLayersAccessor GetInputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop. More...
 
OutputLayersAccessor GetOutputLayers () const
 Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop. More...
 
size_t GetNumLayers () const
 
Status AllocateDynamicBuffers ()
 Allocates memory for all tensors under output tensor handers of each layer. More...
 
void AddCompatibilityLayers (std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
 Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, IConnectableLayer *substituteLayer)
 Substitutes the given sub-graph with either a new layer or a new sub-graph. More...
 
void SubstituteSubgraph (SubgraphView &subgraph, const SubgraphView &substituteSubgraph)
 
void VerifyConstantLayerSetTensorInfo () const
 For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots. More...
 
void InferTensorInfos ()
 
void AttachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
void DetachObservable (IGraphObservable *const observable, GraphEvent notifyOnEvent)
 
Iterator GetPosInGraph (Layer &layer)
 Gets the position of a layer in the graph. More...
 
const std::shared_ptr< IProfiler > & GetProfiler () const
 

Static Public Member Functions

template<typename LayerType >
static LayerTypePtrCast (Layer *const layer)
 

Detailed Description

Definition at line 30 of file Graph.hpp.

Member Typedef Documentation

◆ ConstIterator

using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>

Definition at line 54 of file Graph.hpp.

◆ ConstIteratorInputs

Definition at line 55 of file Graph.hpp.

◆ ConstIteratorOutputs

Definition at line 56 of file Graph.hpp.

◆ Iterator

using Iterator = LayerList::const_iterator

Definition at line 51 of file Graph.hpp.

◆ IteratorDifference

using IteratorDifference = Iterator::difference_type

Definition at line 52 of file Graph.hpp.

◆ LayerList

using LayerList = std::list<Layer*>

Definition at line 50 of file Graph.hpp.

Constructor & Destructor Documentation

◆ Graph() [1/3]

Graph ( bool  shapeInferenceMethod = false)
inline

Definition at line 96 of file Graph.hpp.

References Graph::operator=().

97  : m_LayersInOrder(true)
98  , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
100  , m_Profiler(std::make_shared<IProfiler>())
101  {}
Validate all output shapes.
Infer missing output shapes and validate all output shapes.

◆ Graph() [2/3]

Graph ( const Graph other)

Definition at line 28 of file Graph.cpp.

References Layer::BeginOutputSlots(), Layer::Clone(), and Layer::GetInputSlot().

29 : m_LayersInOrder(other.m_LayersInOrder)
30 , m_Profiler(other.m_Profiler)
31 {
32  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
33 
34  for (auto&& otherLayer : other.m_Layers)
35  {
36  Layer* const layer = otherLayer->Clone(*this);
37  otherToClonedMap.emplace(otherLayer, layer);
38  }
39 
40  // Copies slot connections.
41  for (auto&& otherLayer : other.m_Layers)
42  {
43  Layer* const thisLayer = otherToClonedMap[otherLayer];
44 
45  auto outputSlot = thisLayer->BeginOutputSlots();
46  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
47  {
48  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
49  {
50  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
51  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
52 
53  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
54  outputSlot->Connect(inputSlot);
55  }
56  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
57  ++outputSlot;
58  }
59  }
60 }

◆ Graph() [3/3]

Graph ( Graph &&  other)
inline

Definition at line 107 of file Graph.hpp.

108  {
109  *this = std::move(other);
110  }

◆ ~Graph()

~Graph ( )
inline

Definition at line 131 of file Graph.hpp.

References Graph::AddLayer(), Graph::EraseLayer(), Graph::ForEachLayer(), Graph::InsertNewLayer(), Graph::Print(), and Graph::SerializeToDot().

132  {
133  ForEachLayer([](Layer* layer)
134  {
135  delete layer;
136  });
137  }
void ForEachLayer(Func func) const
Definition: Graph.hpp:40

Member Function Documentation

◆ AddCompatibilityLayers()

void AddCompatibilityLayers ( std::map< BackendId, std::unique_ptr< class IBackendInternal >> &  backends,
TensorHandleFactoryRegistry registry 
)

Modifies the graph in-place, removing edges connecting layers using different compute devices, and relinking them via an intermediary copy layers.

Definition at line 302 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, armnn::CopyToTarget, armnn::DirectCompatibility, armnn::ExportToTarget, Graph::ForEachLayer(), Layer::GetBackendId(), OutputSlot::GetConnections(), OutputSlot::GetEdgeStrategies(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetName(), Layer::GetOutputSlot(), Layer::GetOutputSlots(), InputSlot::GetOwningLayer(), InputSlot::GetSlotIndex(), OutputSlot::GetTensorHandleFactoryId(), ITensorHandleFactory::LegacyFactoryId, armnn::MemCopy, armnn::MemImport, OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorHandleFactory(), and armnn::Undefined.

Referenced by Graph::GetNumLayers(), armnn::Optimize(), and TEST_SUITE().

304 {
305  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
306  // connections to other layers).
307  auto MayNeedCompatibilityLayer = [](const Layer& layer)
308  {
309  // All layers should have been associated with a valid compute device at this point.
310  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
311  // Does not need another compatibility layer if a copy or import layer is already present.
312  return layer.GetType() != LayerType::MemCopy &&
313  layer.GetType() != LayerType::MemImport;
314  };
315 
316  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
317  {
318  return strategy == EdgeStrategy::CopyToTarget ||
319  strategy == EdgeStrategy::ExportToTarget;
320  };
321 
322  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
323  {
324  ARMNN_ASSERT(srcLayer);
325 
326  if (!MayNeedCompatibilityLayer(*srcLayer))
327  {
328  // The current layer does not need copy layers, move to the next one
329  return;
330  }
331 
332  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
333  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
334  {
335  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
336  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
337  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
338  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
339  {
340  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
341  ARMNN_ASSERT(dstInputSlot);
342 
343  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
345  "Undefined memory strategy found while adding copy layers for compatibility");
346 
347  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
348  if (MayNeedCompatibilityLayer(dstLayer) &&
349  IsCompatibilityStrategy(strategy))
350  {
351  // A copy layer is needed in between the source and destination layers.
352  // Record the operation rather than attempting to modify the graph as we go.
353  // (invalidating iterators)
354  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
355  srcLayer->GetName(),
356  srcOutputIndex,
357  dstLayer.GetName(),
358  dstInputSlot->GetSlotIndex());
359  Layer* compLayer = nullptr;
360  if (strategy == EdgeStrategy::CopyToTarget)
361  {
362  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
363  }
364  else
365  {
366  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
367  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
368  }
369 
370  compLayer->SetBackendId(dstLayer.GetBackendId());
371 
372  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
373  auto backendIt = backends.find(dstLayer.GetBackendId());
374  if (backendIt != backends.end() &&
375  backendIt->second &&
376  backendIt->second->SupportsTensorAllocatorAPI())
377  {
378  auto backend = backendIt->second.get();
379  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
380  bool found = false;
381 
382  for (auto preference : tensorHandleFactoryIds)
383  {
384  auto factory = registry.GetFactory(preference);
385  if (factory)
386  {
387  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
388  auto srcFactory = registry.GetFactory(srcPref);
389 
390  if (srcFactory)
391  {
392  bool canExportImport =
393  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
394 
395  if (factory->SupportsMapUnmap() || canExportImport)
396  {
397  compOutputSlot.SetTensorHandleFactory(preference);
398  found = true;
399  break;
400  }
401  }
402  }
403  }
404 
405  if (!found)
406  {
407  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
408  }
409  }
410  else
411  {
412  compOutputSlot.SetTensorHandleFactory(ITensorHandleFactory::LegacyFactoryId);
413  }
414 
415  // The output strategy of a compatibility layer is always DirectCompatibility.
416  compOutputSlot.SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
417 
418  // Recalculate the connection index on the previous layer as we have just inserted into it.
419  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
420  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
421  std::find(newSourceConnections.begin(),
422  newSourceConnections.end(),
423  &compLayer->GetInputSlot(0)));
424 
425  // The input strategy of a compatibility layer is always DirectCompatibilty.
426  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
428  }
429  }
430  }
431  });
432 }
No strategy has been defined. Used internally to verify integrity of optimizations.
Source backends tensor data can be exported to destination backend tensor without copy...
Destination backend can work directly with tensors on source backend.
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
static const FactoryId LegacyFactoryId

◆ AddLayer()

LayerT * AddLayer ( Args &&...  args)
inline

Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.

Definition at line 417 of file Graph.hpp.

References armnn::Input, armnn::LayerAdded, and armnn::Output.

Referenced by ArgMinMaxInferOutputShapeImpl(), BatchToSpaceInferOutputShapeTest(), armnn::ChainReduceLayers(), Layer::CloneBase(), Convolution2dInferOutputShapeTest(), Convolution3dInferOutputShapeTest(), CreatePreluLayerHelper(), CreateStackLayerHelper(), DepthwiseConvolution2dInferOutputShapeTest(), armnn::FuseLayerWithoutParameters(), armnn::FuseLayerWithParameters(), MockBackend::OptimizeSubgraphView(), PreluInferOutputShapeImpl(), QLstmInferOutputShapeImpl(), QuantizedLstmInferOutputShapeImpl(), SpaceToDepthInferOutputShapeTest(), StackInferOutputShapeImpl(), TEST_SUITE(), TransposeConvolution2dInferOutputShapeTest(), and Graph::~Graph().

418 {
419  m_LayersInOrder = m_LayersInOrder &&
420  ((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
421  LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
422 
423  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
424 
425  NotifyObservables(GraphEvent::LayerAdded, layer);
426 
427  return layer;
428 }

◆ AllocateDynamicBuffers()

Status AllocateDynamicBuffers ( )

Allocates memory for all tensors under output tensor handers of each layer.

Definition at line 180 of file Graph.cpp.

References ITensorHandle::Allocate(), ARMNN_ASSERT, ARMNN_SCOPED_PROFILING_EVENT, armnn::Constant, ITensorHandle::GetParent(), ITensorHandle::Manage(), armnn::Success, and armnn::Undefined.

Referenced by Graph::GetNumLayers(), and TEST_SUITE().

181 {
182  // Layers must be sorted in topological order
183  ARMNN_ASSERT(m_LayersInOrder);
184  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
185 
186  std::unordered_set<const ITensorHandle*> preallocatedTensors;
187  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
188 
189  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
190  // is a TensorHandle, the function just returns it
191  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
192  {
193  ITensorHandle* ancestor = subTensorHandle;
194  while (ancestor && ancestor->GetParent())
195  {
196  ancestor = ancestor->GetParent();
197  }
198  return ancestor;
199  };
200 
201  // Checks whether a TensorHandle has been pre-allocated
202  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
203  {
204  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
205  };
206 
207  // Constant tensor handles need to last from the beginning of execution till the end,
208  // therefore we pre-allocate them upfront
209  for (auto&& layer : m_Layers)
210  {
211  if (layer->GetType() == LayerType::Constant)
212  {
213  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
214  {
215  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
216 
217  if (tensorHandle && !IsPreallocated(tensorHandle))
218  {
219  tensorHandle->Allocate();
220  preallocatedTensors.insert(tensorHandle);
221  }
222  }
223  }
224  }
225 
226  // Iterate over the network in topological order
227  for (auto&& layer : m_Layers)
228  {
229  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
230  // The first time we encounter a new tensor handle, we start managing its lifetime.
231  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
232  {
233  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
234 
235  if (tensorHandle && !IsPreallocated(tensorHandle))
236  {
237  unsigned int numConnections = slot->GetNumConnections();
238  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
239  {
240  handleReferenceCounts[tensorHandle] = numConnections;
241  tensorHandle->Manage();
242  if (handleReferenceCounts[tensorHandle] == 0u)
243  {
244  // if nobody consumes this tensor we call Allocate()
245  tensorHandle->Allocate();
246  }
247  }
248  else
249  {
250  handleReferenceCounts[tensorHandle] += numConnections;
251  }
252  }
253  }
254 
255  // Loop through the input slots in the same layer and decrement the reference counter associated
256  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
257  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
258  {
259  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
260  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
261 
262  if (tensorHandle && !IsPreallocated(tensorHandle))
263  {
264  --handleReferenceCounts[tensorHandle];
265 
266  if (handleReferenceCounts[tensorHandle] == 0u)
267  {
268  // Stop managing lifetime of tensor handle
269  tensorHandle->Allocate();
270  handleReferenceCounts.erase(tensorHandle);
271  }
272  }
273  }
274  }
275 
276  return Status::Success;
277 }
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ AttachObservable()

void AttachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 215 of file Graph.hpp.

Referenced by GraphObservable< Layer *>::GraphObservable().

215  {
216  m_Views[notifyOnEvent].emplace_back(observable);
217  }

◆ begin() [1/2]

Iterator begin ( )
inline

Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 165 of file Graph.hpp.

Referenced by armnn::Optimize(), Optimizer::Pass(), and TEST_SUITE().

165 { return m_Layers.begin(); }

◆ begin() [2/2]

ConstIterator begin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 170 of file Graph.hpp.

170 { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }

◆ cbegin()

ConstIterator cbegin ( ) const
inline

Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.

Definition at line 175 of file Graph.hpp.

References Graph::InputLayersAccessor::begin().

Referenced by TEST_SUITE().

175 { return begin(); }
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:165

◆ cend()

ConstIterator cend ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 177 of file Graph.hpp.

References Graph::InputLayersAccessor::end().

Referenced by TEST_SUITE().

177 { return end(); }
Iterator end()
Returns iterator pointing to the end of the list. Lowercase for range-based for loops.
Definition: Graph.hpp:167

◆ DetachObservable()

void DetachObservable ( IGraphObservable *const  observable,
GraphEvent  notifyOnEvent 
)
inline

Definition at line 219 of file Graph.hpp.

References ARMNN_ASSERT, Graph::GetPosInGraph(), Graph::GetProfiler(), armnn::IgnoreUnused(), armnn::Input, Graph::InputLayersAccessor::m_Graph, and armnn::Output.

Referenced by GraphObservable< Layer *>::~GraphObservable().

219  {
220  m_Views[notifyOnEvent].remove(observable);
221  }

◆ end() [1/2]

Iterator end ( )
inline

Returns iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 167 of file Graph.hpp.

Referenced by armnn::Optimize(), Optimizer::Pass(), and TEST_SUITE().

167 { return m_Layers.end(); }

◆ end() [2/2]

ConstIterator end ( ) const
inline

Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.

Definition at line 172 of file Graph.hpp.

172 { return {m_Layers.end(), &(PtrCast<const Layer>)}; }

◆ EraseLayer() [1/2]

void EraseLayer ( Iterator  pos)
inline

◆ EraseLayer() [2/2]

void EraseLayer ( LayerT *&  layer)
inline

Deletes the layer.

Sets layer to nullptr on return. Templated to support pointers to any layer type.

Definition at line 472 of file Graph.hpp.

References ARMNN_ASSERT, Graph::EraseLayer(), and Graph::GetPosInGraph().

473 {
474  ARMNN_ASSERT(layer != nullptr);
475  EraseLayer(GetPosInGraph(*layer));
476  layer = nullptr;
477 }
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:464
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:409
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ ForEachLayer()

void ForEachLayer ( Func  func) const
inline

Definition at line 40 of file Graph.hpp.

Referenced by Graph::AddCompatibilityLayers(), Graph::operator=(), armnn::SelectTensorHandleStrategy(), TEST_SUITE(), and Graph::~Graph().

41  {
42  for (auto it = m_Layers.begin(); it != m_Layers.end(); )
43  {
44  auto next = std::next(it);
45  func(*it);
46  it = next;
47  }
48  }

◆ GetInputLayers()

InputLayersAccessor GetInputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the input layers in a range-based for loop.

Definition at line 188 of file Graph.hpp.

References Graph::InputLayersAccessor::InputLayersAccessor().

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportInputs().

188 { return InputLayersAccessor(*this); }

◆ GetNumInputs()

size_t GetNumInputs ( ) const
inline

Definition at line 183 of file Graph.hpp.

Referenced by Graph::InputLayersAccessor::end(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

183 { return m_InputIds.size(); }

◆ GetNumLayers()

size_t GetNumLayers ( ) const
inline

◆ GetNumOutputs()

size_t GetNumOutputs ( ) const
inline

Definition at line 184 of file Graph.hpp.

Referenced by Graph::OutputLayersAccessor::begin(), LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::Execute().

184 { return m_OutputIds.size(); }

◆ GetOutputLayers()

OutputLayersAccessor GetOutputLayers ( ) const
inline

Returns a wrapper object with begin(), end() methods to iterate over the output layers in a range-based for loop.

Definition at line 192 of file Graph.hpp.

Referenced by LoadedNetwork::EnqueueWorkload(), and LoadedNetwork::ImportOutputs().

192 { return OutputLayersAccessor(*this); }

◆ GetPosInGraph()

Graph::Iterator GetPosInGraph ( Layer layer)
inline

Gets the position of a layer in the graph.

Definition at line 409 of file Graph.hpp.

References ARMNN_ASSERT.

Referenced by Graph::DetachObservable(), Graph::EraseLayer(), Graph::InsertNewLayer(), and Optimizer::Pass().

410 {
411  auto it = m_PosInGraphMap.find(&layer);
412  ARMNN_ASSERT(it != m_PosInGraphMap.end());
413  return it->second;
414 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ GetProfiler()

const std::shared_ptr< IProfiler > & GetProfiler ( ) const

Definition at line 641 of file Graph.cpp.

Referenced by Graph::DetachObservable().

642 {
643  return m_Profiler;
644 }

◆ InferTensorInfos()

void InferTensorInfos ( )

Definition at line 558 of file Graph.cpp.

References armnn::Convolution3d, armnn::FullyConnected, InputSlot::GetConnectedOutputSlot(), Layer::GetInputSlot(), armnn::GetLayerTypeAsCString(), Layer::GetName(), Layer::GetNumInputSlots(), Layer::GetType(), IOutputSlot::IsTensorInfoSet(), Graph::TopologicalSort(), and armnn::ValidateOnly.

Referenced by Graph::GetNumLayers(), armnn::Optimize(), PreluValidateTensorShapesFromInputsMatchTest(), PreluValidateTensorShapesFromInputsNoMatchTest(), StackValidateTensorShapesFromInputsMatchTest(), StackValidateTensorShapesFromInputsNoMatchTest(), and TEST_SUITE().

559 {
560  for (auto&& layer : TopologicalSort())
561  {
562  for (auto&& input : layer->GetInputSlots())
563  {
564  const IOutputSlot* source = input.GetConnectedOutputSlot();
565  if (source == NULL)
566  {
567  // Throws exception due to a layer input not being connected to an output slot.
568  // Verifies input slot weights and bias are set for FullyConnected layers.
569  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
570  }
571 
572  if (!source->IsTensorInfoSet())
573  {
574  std::ostringstream message;
575  message << "Output slot TensorInfo not set on "
576  << GetLayerTypeAsCString(layer->GetType())
577  << " layer "
578  << std::quoted(layer->GetName());
579  throw LayerValidationException(message.str());
580  }
581  }
582 
583  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
584  {
585  layer->ValidateTensorShapesFromInputs();
586  }
587  }
588 }
Validate all output shapes.
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180
const char * GetLayerTypeAsCString(LayerType type)

◆ InsertNewLayer() [1/2]

LayerT * InsertNewLayer ( InputSlot insertBefore,
Args &&...  args 
)
inline

Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itself.

Definition at line 431 of file Graph.hpp.

References InputSlot::GetConnectedOutputSlot(), InputSlot::GetOwningLayer(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), InputSlot::Insert(), and armnn::LayerAdded.

Referenced by armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), AddBroadcastReshapeLayerImpl::Run(), TEST_SUITE(), and Graph::~Graph().

432 {
433  // Insert after the parent if any, or before the child otherwise, so the topological order is kept.
434  OutputSlot* parentOut = insertBefore.GetConnectedOutputSlot();
435  const Iterator pos = (parentOut != nullptr)
436  ? std::next(GetPosInGraph(parentOut->GetOwningLayer()))
437  : GetPosInGraph(insertBefore.GetOwningLayer());
438  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
439  insertBefore.Insert(*layer);
440 
441  NotifyObservables(GraphEvent::LayerAdded, layer);
442 
443  return layer;
444 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:51
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:409

◆ InsertNewLayer() [2/2]

LayerT * InsertNewLayer ( OutputSlot insertAfter,
Args &&...  args 
)
inline

Inserts a new layer between insertAfter and the input slot(s) currently connected to it.

Definition at line 447 of file Graph.hpp.

References ARMNN_ASSERT, OutputSlot::Connect(), OutputSlot::GetOwningLayer(), Graph::GetPosInGraph(), armnn::LayerAdded, and OutputSlot::MoveAllConnections().

448 {
449  Layer& owningLayer = insertAfter.GetOwningLayer();
450 
451  const Iterator pos = std::next(GetPosInGraph(owningLayer));
452  LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
453 
454  ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
455 
456  insertAfter.MoveAllConnections(layer->GetOutputSlot());
457  insertAfter.Connect(layer->GetInputSlot(0));
458 
459  NotifyObservables(GraphEvent::LayerAdded, layer);
460 
461  return layer;
462 }
LayerList::const_iterator Iterator
Definition: Graph.hpp:51
Iterator GetPosInGraph(Layer &layer)
Gets the position of a layer in the graph.
Definition: Graph.hpp:409
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ operator=() [1/2]

Graph& operator= ( const Graph other)
delete

Referenced by Graph::Graph().

◆ operator=() [2/2]

Graph& operator= ( Graph &&  other)
inline

Definition at line 112 of file Graph.hpp.

References ARMNN_ASSERT, Graph::ForEachLayer(), and Layer::Reparent().

113  {
114  m_InputIds = std::move(other.m_InputIds);
115  m_OutputIds = std::move(other.m_OutputIds);
116  m_LayersInOrder = std::move(other.m_LayersInOrder);
117  m_Views = std::move(other.m_Views);
118  m_Profiler = std::move(other.m_Profiler);
119 
120  other.ForEachLayer([this](Layer* otherLayer)
121  {
122  otherLayer->Reparent(*this, m_Layers.end());
123  });
124 
125  ARMNN_ASSERT(other.m_PosInGraphMap.empty());
126  ARMNN_ASSERT(other.m_Layers.empty());
127 
128  return *this;
129  }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ Print()

Status Print ( ) const

Definition at line 62 of file Graph.cpp.

References ARMNN_LOG, armnn::GetLayerTypeAsCString(), Layer::GetOutputSlots(), armnn::info, armnn::Success, and Graph::TopologicalSort().

Referenced by CheckOrder(), and Graph::~Graph().

63 {
64  if (m_Layers.empty())
65  {
66  ARMNN_LOG(info) << "\n Graph is empty.\n";
67  return Status::Success;
68  }
69  ARMNN_LOG(info) << "\n";
70  ARMNN_LOG(info) << "Walking Pattern: \n";
71 
72  for (auto&& it : TopologicalSort())
73  {
74  auto numInputSlots = it->GetNumInputSlots();
75  auto numOutputSlots = it->GetNumOutputSlots();
76 
77  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
78  << ":" << it->GetBackendId().Get()
79  << " has " << numInputSlots << " input slots"
80  << " and " << numOutputSlots << " output slots.";
81 
82  for (auto i : it->GetInputSlots())
83  {
84  std::ostringstream message;
85  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
86  unsigned int numDims = inputTensorShape.GetNumDimensions();
87 
88  message << "The input slot has shape [ ";
89  for (unsigned int dim=0; dim < numDims; dim++)
90  {
91  message << inputTensorShape[dim] << ",";
92  }
93  message << " ]";
94  ARMNN_LOG(info) << message.str();
95  }
96 
97  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
98  {
99  const armnn::Layer *layer = it;
100  std::ostringstream message;
101  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
102  unsigned int numDims = outputTensorShape.GetNumDimensions();
103 
104  message << "The output slot has shape [ ";
105  for (unsigned int dim=0; dim < numDims; dim++)
106  {
107  message << outputTensorShape[dim] << ",";
108  }
109  message << " ]";
110  ARMNN_LOG(info) << message.str();
111  }
112  ARMNN_LOG(info) << "\n";
113  }
114  ARMNN_LOG(info) << "\n\n";
115 
116  return Status::Success;
117 }
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:238
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180
const char * GetLayerTypeAsCString(LayerType type)

◆ PtrCast()

static LayerType* PtrCast ( Layer *const  layer)
inlinestatic

Definition at line 34 of file Graph.hpp.

35  {
36  return PolymorphicDowncast<LayerType*>(layer);
37  }

◆ SerializeToDot()

Status SerializeToDot ( std::ostream &  stream)

Definition at line 119 of file Graph.cpp.

References DotAttributeSet::AddAttribute(), NodeContent::AddContent(), armnn::Failure, DotEdge::GetAttributeSet(), DotDefaults::GetAttributeSet(), DotNode::GetContents(), Layer::GetGuid(), armnn::GetLayerTypeAsCString(), OutputSlot::GetOwningLayer(), TensorInfo::GetShape(), OutputSlot::GetTensorInfo(), and armnn::Success.

Referenced by TEST_SUITE(), and Graph::~Graph().

120 {
121  {
122  DotGraph graph(stream, "Optimized");
123 
124  {
125  // Default node attributes:
126  DotDefaults nodes(stream, "node");
127  nodes.GetAttributeSet()
128  .AddAttribute("shape", "record");
129  }
130 
131  {
132  // Default edge attributes:
133  DotDefaults edges(stream, "edge");
134  edges.GetAttributeSet()
135  .AddAttribute("fontsize", 8)
136  .AddAttribute("fontcolor", "blue")
137  .AddAttribute("fontname", "arial-bold");
138  }
139 
140  // First declares the nodes.
141  for (auto&& layer : m_Layers)
142  {
143  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
144  // Extracts the layer parameters.
145  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
146  node.GetContents().AddContent(name + " : " + value);
147  };
148  layer->SerializeLayerParameters(extractParams);
149  }
150 
151  // Second declares the edges.
152  for (auto&& layer : m_Layers)
153  {
154  LayerGuid toId = layer->GetGuid();
155 
156  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
157  {
158  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
159  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
160  DotEdge edge(stream, fromId, toId);
161 
162  // Now print the tensor shape on the edge.
163  {
164  // Constructs the label attribute with HTML markup.
165  std::stringstream ss;
166  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
167  edge.GetAttributeSet().AddAttribute("label", ss);
168  }
169  }
170  }
171  }
172 
173  if (stream.bad())
174  {
175  return Status::Failure;
176  }
177  return Status::Success;
178 }
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:349
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const char * GetLayerTypeAsCString(LayerType type)

◆ SubstituteSubgraph() [1/2]

void SubstituteSubgraph ( SubgraphView subgraph,
IConnectableLayer substituteLayer 
)

Substitutes the given sub-graph with either a new layer or a new sub-graph.

In either case, the given layer or all the layers in the given sub-graph must belong to this graph.

Definition at line 434 of file Graph.cpp.

References ARMNN_ASSERT.

Referenced by armnn::ApplyBackendOptimizations(), Graph::GetNumLayers(), and TEST_SUITE().

435 {
436  ARMNN_ASSERT(substituteLayer != nullptr);
437 
438  ReplaceSubgraphConnections(subgraph, substituteLayer);
439  EraseSubgraphLayers(subgraph);
440 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14

◆ SubstituteSubgraph() [2/2]

void SubstituteSubgraph ( SubgraphView subgraph,
const SubgraphView substituteSubgraph 
)

Definition at line 442 of file Graph.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, SubgraphView::Clear(), IOutputSlot::Connect(), IOutputSlot::Disconnect(), Graph::EraseLayer(), SubgraphView::ForEachLayer(), InputSlot::GetConnection(), SubgraphView::GetInputSlots(), SubgraphView::GetLayers(), SubgraphView::GetOutputSlots(), armnn::IgnoreUnused(), OutputSlot::MoveAllConnections(), armnn::numeric_cast(), Layer::Reparent(), and Graph::TopologicalSort().

443 {
444  // Look through each layer in the new subgraph and add any that are not already a member of this graph
445  substituteSubgraph.ForEachLayer([this](Layer* layer)
446  {
447  if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
448  {
449  layer->Reparent(*this, m_Layers.end());
450  m_LayersInOrder = false;
451  }
452  });
453 
454  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
455  EraseSubgraphLayers(subgraph);
456  TopologicalSort();
457 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180

◆ TopologicalSort() [1/2]

Graph& TopologicalSort ( )
inline

Sorts layers in topological order and return this.

Definition at line 180 of file Graph.hpp.

References Graph::TopologicalSort().

Referenced by CheckOrder(), LoadedNetwork::ImportInputs(), LoadedNetwork::ImportOutputs(), Graph::InferTensorInfos(), LoadedNetwork::MakeLoadedNetwork(), Optimizer::Pass(), Graph::Print(), LoadedNetwork::RegisterDebugCallback(), LoadedNetwork::SendNetworkStructure(), Graph::SubstituteSubgraph(), TEST_SUITE(), Graph::TopologicalSort(), and Graph::VerifyConstantLayerSetTensorInfo().

180 { const_cast<const Graph*>(this)->TopologicalSort(); return *this; }
Graph(bool shapeInferenceMethod=false)
Definition: Graph.hpp:96
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180

◆ TopologicalSort() [2/2]

const Graph & TopologicalSort ( ) const

Definition at line 279 of file Graph.cpp.

280 {
281  if (!m_LayersInOrder)
282  {
283  // Resets layer order.
284  for (auto&& it : m_Layers)
285  {
286  it->ResetPriority();
287  }
288 
289  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
290  {
291  return layerA->GetPriority() < layerB->GetPriority();
292  };
293 
294  m_Layers.sort(compareLayerPriority);
295 
296  m_LayersInOrder = true;
297  }
298 
299  return *this;
300 }

◆ VerifyConstantLayerSetTensorInfo()

void VerifyConstantLayerSetTensorInfo ( ) const

For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.

LayerValidationException thrown if no TensorInfo is set.

LayerValidationException thrown if no TensorInfo is set.

Exceptions
LayerValidationException

Definition at line 535 of file Graph.cpp.

References armnn::Constant, armnn::GetLayerTypeAsCString(), and Graph::TopologicalSort().

Referenced by Graph::GetNumLayers().

536 {
537  for (auto&& layer : TopologicalSort())
538  {
539  if (layer->GetType() == armnn::LayerType::Constant)
540  {
541  for (auto&& output: layer->GetOutputSlots())
542  {
543  if (!output.IsTensorInfoSet())
544  {
545  std::ostringstream message;
546  message << "Output slot TensorInfo not set on "
547  << GetLayerTypeAsCString(layer->GetType())
548  << " layer \""
549  << layer->GetName()
550  << "\"";
551  throw LayerValidationException(message.str());
552  }
553  }
554  }
555  }
556 }
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180
const char * GetLayerTypeAsCString(LayerType type)

The documentation for this class was generated from the following files: