ArmNN
 22.08
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Graph.hpp>
7 #include <LayersFwd.hpp>
8 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/utility/Assert.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55  outputSlot->Connect(inputSlot);
56  }
57  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
58  ++outputSlot;
59  }
60  }
61 }
62 
64 {
65  if (m_Layers.empty())
66  {
67  ARMNN_LOG(info) << "\n Graph is empty.\n";
68  return Status::Success;
69  }
70  ARMNN_LOG(info) << "\n";
71  ARMNN_LOG(info) << "Walking Pattern: \n";
72 
73  for (auto&& it : TopologicalSort())
74  {
75  auto numInputSlots = it->GetNumInputSlots();
76  auto numOutputSlots = it->GetNumOutputSlots();
77 
78  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
79  << ":" << it->GetBackendId().Get()
80  << " has " << numInputSlots << " input slots"
81  << " and " << numOutputSlots << " output slots.";
82 
83  for (auto i : it->GetInputSlots())
84  {
85  std::ostringstream message;
86  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
87  unsigned int numDims = inputTensorShape.GetNumDimensions();
88 
89  message << "The input slot has shape [ ";
90  for (unsigned int dim=0; dim < numDims; dim++)
91  {
92  message << inputTensorShape[dim] << ",";
93  }
94  message << " ]";
95  ARMNN_LOG(info) << message.str();
96  }
97 
98  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
99  {
100  const armnn::Layer *layer = it;
101  std::ostringstream message;
102  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
103  unsigned int numDims = outputTensorShape.GetNumDimensions();
104 
105  message << "The output slot has shape [ ";
106  for (unsigned int dim=0; dim < numDims; dim++)
107  {
108  message << outputTensorShape[dim] << ",";
109  }
110  message << " ]";
111  ARMNN_LOG(info) << message.str();
112  }
113  ARMNN_LOG(info) << "\n";
114  }
115  ARMNN_LOG(info) << "\n\n";
116 
117  return Status::Success;
118 }
119 
120 Status Graph::SerializeToDot(std::ostream& stream)
121 {
122  {
123  DotGraph graph(stream, "Optimized");
124 
125  {
126  // Default node attributes:
127  DotDefaults nodes(stream, "node");
128  nodes.GetAttributeSet()
129  .AddAttribute("shape", "record");
130  }
131 
132  {
133  // Default edge attributes:
134  DotDefaults edges(stream, "edge");
135  edges.GetAttributeSet()
136  .AddAttribute("fontsize", 8)
137  .AddAttribute("fontcolor", "blue")
138  .AddAttribute("fontname", "arial-bold");
139  }
140 
141  // First declares the nodes.
142  for (auto&& layer : m_Layers)
143  {
144  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
145  // Extracts the layer parameters.
146  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
147  node.GetContents().AddContent(name + " : " + value);
148  };
149  layer->SerializeLayerParameters(extractParams);
150  }
151 
152  // Second declares the edges.
153  for (auto&& layer : m_Layers)
154  {
155  LayerGuid toId = layer->GetGuid();
156 
157  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
158  {
159  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
160  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
161  DotEdge edge(stream, fromId, toId);
162 
163  // Now print the tensor shape on the edge.
164  {
165  // Constructs the label attribute with HTML markup.
166  std::stringstream ss;
167  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
168  edge.GetAttributeSet().AddAttribute("label", ss);
169  }
170  }
171  }
172  }
173 
174  if (stream.bad())
175  {
176  return Status::Failure;
177  }
178  return Status::Success;
179 }
180 
182 {
183  // Layers must be sorted in topological order
184  ARMNN_ASSERT(m_LayersInOrder);
185  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
186 
187  std::unordered_set<const ITensorHandle*> preallocatedTensors;
188  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
189 
190  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
191  // is a TensorHandle, the function just returns it
192  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
193  {
194  ITensorHandle* ancestor = subTensorHandle;
195  while (ancestor && ancestor->GetParent())
196  {
197  ancestor = ancestor->GetParent();
198  }
199  return ancestor;
200  };
201 
202  // Checks whether a TensorHandle has been pre-allocated
203  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
204  {
205  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
206  };
207 
208  // Constant tensor handles need to last from the beginning of execution till the end,
209  // therefore we pre-allocate them upfront
210  for (auto&& layer : m_Layers)
211  {
212  if (layer->GetType() == LayerType::Constant)
213  {
214  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
215  {
216  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
217 
218  if (tensorHandle && !IsPreallocated(tensorHandle))
219  {
220  tensorHandle->Allocate();
221  preallocatedTensors.insert(tensorHandle);
222  }
223  }
224  }
225  }
226 
227  // Iterate over the network in topological order
228  for (auto&& layer : m_Layers)
229  {
230  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
231  // The first time we encounter a new tensor handle, we start managing its lifetime.
232  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
233  {
234  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
235 
236  if (tensorHandle && !IsPreallocated(tensorHandle))
237  {
238  unsigned int numConnections = slot->GetNumConnections();
239  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
240  {
241  handleReferenceCounts[tensorHandle] = numConnections;
242  tensorHandle->Manage();
243  if (handleReferenceCounts[tensorHandle] == 0u)
244  {
245  // if nobody consumes this tensor we call Allocate()
246  tensorHandle->Allocate();
247  }
248  }
249  else
250  {
251  handleReferenceCounts[tensorHandle] += numConnections;
252  }
253  }
254  }
255 
256  // Loop through the input slots in the same layer and decrement the reference counter associated
257  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
258  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
259  {
260  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
261  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
262 
263  if (tensorHandle && !IsPreallocated(tensorHandle))
264  {
265  --handleReferenceCounts[tensorHandle];
266 
267  if (handleReferenceCounts[tensorHandle] == 0u)
268  {
269  // Stop managing lifetime of tensor handle
270  tensorHandle->Allocate();
271  handleReferenceCounts.erase(tensorHandle);
272  }
273  }
274  }
275  }
276 
277  return Status::Success;
278 }
279 
281 {
282  if (!m_LayersInOrder)
283  {
284  // Resets layer order.
285  for (auto&& it : m_Layers)
286  {
287  it->ResetPriority();
288  }
289 
290  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
291  {
292  return layerA->GetPriority() < layerB->GetPriority();
293  };
294 
295  m_Layers.sort(compareLayerPriority);
296 
297  m_LayersInOrder = true;
298  }
299 
300  return *this;
301 }
302 
303 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
304  TensorHandleFactoryRegistry& registry)
305 {
306  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
307  // connections to other layers).
308  auto MayNeedCompatibilityLayer = [](const Layer& layer)
309  {
310  // All layers should have been associated with a valid compute device at this point.
311  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
312  // Does not need another compatibility layer if a copy or import layer is already present.
313  return layer.GetType() != LayerType::MemCopy &&
314  layer.GetType() != LayerType::MemImport;
315  };
316 
317  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
318  {
319  return strategy == EdgeStrategy::CopyToTarget ||
320  strategy == EdgeStrategy::ExportToTarget;
321  };
322 
323  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
324  {
325  ARMNN_ASSERT(srcLayer);
326 
327  if (!MayNeedCompatibilityLayer(*srcLayer))
328  {
329  // The current layer does not need copy layers, move to the next one
330  return;
331  }
332 
333  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
334  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
335  {
336  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
337  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
338  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
339  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
340  {
341  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
342  ARMNN_ASSERT(dstInputSlot);
343 
344  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
346  "Undefined memory strategy found while adding copy layers for compatibility");
347 
348  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
349  if (MayNeedCompatibilityLayer(dstLayer) &&
350  IsCompatibilityStrategy(strategy))
351  {
352  // A copy layer is needed in between the source and destination layers.
353  // Record the operation rather than attempting to modify the graph as we go.
354  // (invalidating iterators)
355  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
356  srcLayer->GetName(),
357  srcOutputIndex,
358  dstLayer.GetName(),
359  dstInputSlot->GetSlotIndex());
360  Layer* compLayer = nullptr;
361  if (strategy == EdgeStrategy::CopyToTarget)
362  {
363  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
364  }
365  else
366  {
367  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
368  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
369  }
370 
371  compLayer->SetBackendId(dstLayer.GetBackendId());
372 
373  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
374  auto backendIt = backends.find(dstLayer.GetBackendId());
375  if (backendIt != backends.end() &&
376  backendIt->second &&
377  backendIt->second->SupportsTensorAllocatorAPI())
378  {
379  auto backend = backendIt->second.get();
380  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
381  bool found = false;
382 
383  for (auto preference : tensorHandleFactoryIds)
384  {
385  auto factory = registry.GetFactory(preference);
386  if (factory)
387  {
388  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
389  auto srcFactory = registry.GetFactory(srcPref);
390 
391  if (srcFactory)
392  {
393  bool canExportImport =
394  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
395 
396  if (factory->SupportsMapUnmap() || canExportImport)
397  {
398  compOutputSlot.SetTensorHandleFactory(preference);
399  found = true;
400  break;
401  }
402  }
403  }
404  }
405 
406  if (!found)
407  {
409  }
410  }
411  else
412  {
414  }
415 
416  // The output strategy of a compatibility layer is always DirectCompatibility.
418 
419  // Recalculate the connection index on the previous layer as we have just inserted into it.
420  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
421  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
422  std::find(newSourceConnections.begin(),
423  newSourceConnections.end(),
424  &compLayer->GetInputSlot(0)));
425 
426  // The input strategy of a compatibility layer is always DirectCompatibilty.
427  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
429  }
430  }
431  }
432  });
433 }
434 
436 {
437  ARMNN_ASSERT(substituteLayer != nullptr);
438 
439  // Create a new sub-graph with only the given layer, using
440  // the given sub-graph as a reference of which parent graph to use
441  SubgraphView substituteSubgraph(substituteLayer);
442 
443  SubstituteSubgraph(subgraph, substituteSubgraph);
444 }
445 
446 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
447 {
448  // Look through each layer in the new subgraph and add any that are not already a member of this graph
449  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
450  {
451  if (std::find(std::begin(m_Layers),
452  std::end(m_Layers),
453  iConnectableLayer) == std::end(m_Layers))
454  {
455  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
456  layer->Reparent(*this, m_Layers.end());
457  m_LayersInOrder = false;
458  }
459  });
460 
461  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
462  EraseSubgraphLayers(subgraph);
463  TopologicalSort();
464 }
465 
466 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
467 {
468  ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
469  "New sub-graph used for substitution must not be empty");
470 
471  const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
472  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
473  {
474  IgnoreUnused(layer);
475  layer = PolymorphicDowncast<Layer*>(layer);
476  ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
477  "Substitute layer is not a member of graph");
478  });
479 
480  const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
481  const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
482 
483  unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
484  unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
485 
486  const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
487  const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
488 
489  ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
490  ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
491 
492  // Disconnect the sub-graph and replace it with the substitute sub-graph
493 
494  // Step 1: process input slots
495  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
496  {
497  IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
498  ARMNN_ASSERT(subgraphInputSlot);
499 
500  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
501  ARMNN_ASSERT(connectedOutputSlot);
502  connectedOutputSlot->Disconnect(*subgraphInputSlot);
503 
504  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
505  ARMNN_ASSERT(substituteInputSlot);
506  connectedOutputSlot->Connect(*substituteInputSlot);
507  }
508 
509  // Step 2: process output slots
510  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
511  {
512  auto subgraphOutputSlot =
513  PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
514  ARMNN_ASSERT(subgraphOutputSlot);
515 
516  auto substituteOutputSlot =
517  PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
518  ARMNN_ASSERT(substituteOutputSlot);
519 
520  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
521  }
522 }
523 
524 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
525 {
526 
527  for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
528  {
529  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
530  EraseLayer(layer);
531  }
532  subgraph.Clear();
533 }
534 
535 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
536 /// LayerValidationException thrown if no TensorInfo is set.
537 ///
538 /// @throws LayerValidationException
540 {
541  for (auto&& layer : TopologicalSort())
542  {
543  if (layer->GetType() == armnn::LayerType::Constant)
544  {
545  for (auto&& output: layer->GetOutputSlots())
546  {
547  if (!output.IsTensorInfoSet())
548  {
549  std::ostringstream message;
550  message << "Output slot TensorInfo not set on "
551  << GetLayerTypeAsCString(layer->GetType())
552  << " layer \""
553  << layer->GetName()
554  << "\"";
555  throw LayerValidationException(message.str());
556  }
557  }
558  }
559  }
560 }
561 
563 {
564  for (auto&& layer : TopologicalSort())
565  {
566  for (auto&& input : layer->GetInputSlots())
567  {
568  const IOutputSlot* source = input.GetConnectedOutputSlot();
569  if (source == NULL)
570  {
571  // Throws exception due to a layer input not being connected to an output slot.
572  // Verifies input slot weights and bias are set for FullyConnected layers.
573  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
574  }
575 
576  if (!source->IsTensorInfoSet())
577  {
578  std::ostringstream message;
579  message << "Output slot TensorInfo not set on "
580  << GetLayerTypeAsCString(layer->GetType())
581  << " layer "
582  << std::quoted(layer->GetName());
583  throw LayerValidationException(message.str());
584  }
585  }
586 
587  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
588  {
589  layer->ValidateTensorShapesFromInputs();
590  }
591  }
592 }
593 
594 /// Throws exception due to a layer input not being connected to an output slot.
595 /// Verifies weights and bias are set for layers on input slots 1
596 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
597 ///
598 /// @param layer constant pointer to a Layer object
599 /// @param slotIndex input slot index of layer
600 /// @throws LayerValidationException
601 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
602  unsigned int slotIndex)
603 {
604  std::ostringstream message;
605  bool noWeightsAndBias = false;
606 
607  if ((layer->GetType() == armnn::LayerType::FullyConnected ||
610  layer->GetType() == armnn::LayerType::DepthwiseConvolution2d) && slotIndex > 0)
611  {
612  message << std::endl;
613 
614  // If weights are not set and is bias enabled, also check if bias is set
615  if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
616  {
617  const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
618  if (biasSource == NULL)
619  {
620  message << "Weights and bias layers not set." << std::endl;
621  noWeightsAndBias = true;
622  }
623  }
624 
625  // Only weights or bias are not set
626  if (!noWeightsAndBias)
627  {
628  if (slotIndex == 1)
629  {
630  message << "Weights layer not set." << std::endl;
631  }
632  else
633  {
634  message << "Bias layer not set." << std::endl;
635  }
636  }
637  }
638 
639  std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
640  message << "Input slot(s) "
641  << slotString
642  << " for "
643  << GetLayerTypeAsCString(layer->GetType())
644  << " not connected to an output slot. " << std::endl
645  << "Layer name: "
646  << std::quoted(layer->GetName());
647  throw LayerValidationException(message.str());
648 }
649 
650 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
651 {
652  return m_Profiler;
653 }
654 
656 {
657  m_LayersInOrder = false;
658 }
659 
660 } // namespace armnn
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:132
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
NodeContent & AddContent(const std::string &content)
const IOutputSlots & GetIOutputSlots() const
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:210
No strategy has been defined. Used internally to verify integrity of optimizations.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:321
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:120
const IConnectableLayers & GetIConnectableLayers() const
const IInputSlots & GetIInputSlots() const
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:133
Layer & GetOwningLayer() const
Definition: Layer.hpp:119
Source backends tensor data can be exported to destination backend tensor without copy...
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
DotAttributeSet & GetAttributeSet()
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
unsigned int GetSlotIndex() const override
Definition: Layer.hpp:54
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
Destination backend can work directly with tensors on source backend.
The SubgraphView class represents a subgraph of a Graph.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:324
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
std::list< IConnectableLayer * > IConnectableLayers
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
NodeContent & GetContents()
An output connection slot for a layer.
Definition: INetwork.hpp:41
std::vector< IOutputSlot * > IOutputSlots
virtual void Disconnect(IInputSlot &slot)=0
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
Validate all output shapes.
void ForEachIConnectableLayer(Func func) const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:273
Status
enumeration
Definition: Types.hpp:42
std::vector< IInputSlot * > IInputSlots
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
void SetLayersOutOfOrder()
Definition: Graph.cpp:655
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
const BackendId & GetBackendId() const
Definition: Layer.hpp:277
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:246
void VerifyConstantLayerSetTensorInfo() const
For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
Definition: Graph.cpp:539
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:435
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition: Graph.cpp:650
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:200
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:253
void InferTensorInfos()
Definition: Graph.cpp:562
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
virtual bool IsTensorInfoSet() const =0
virtual const IOutputSlot * GetConnection() const =0
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:326
DotAttributeSet & GetAttributeSet()
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:319
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
virtual int Connect(IInputSlot &destination)=0
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:181
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
const char * GetLayerTypeAsCString(LayerType type)
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:303
Status Print() const
Definition: Graph.cpp:63
static const FactoryId LegacyFactoryId
An input connection slot for a layer.
Definition: INetwork.hpp:25
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:330