ArmNN
 22.02
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Graph.hpp>
7 #include <LayersFwd.hpp>
8 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/utility/Assert.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_Profiler(other.m_Profiler)
30 {
31  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
32 
33  for (auto&& otherLayer : other.m_Layers)
34  {
35  Layer* const layer = otherLayer->Clone(*this);
36  otherToClonedMap.emplace(otherLayer, layer);
37  }
38 
39  // Copies slot connections.
40  for (auto&& otherLayer : other.m_Layers)
41  {
42  Layer* const thisLayer = otherToClonedMap[otherLayer];
43 
44  auto outputSlot = thisLayer->BeginOutputSlots();
45  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
46  {
47  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
48  {
49  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
50  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
51 
52  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
53  outputSlot->Connect(inputSlot);
54  }
55  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
56  ++outputSlot;
57  }
58  }
59 }
60 
62 {
63  if (m_Layers.empty())
64  {
65  ARMNN_LOG(info) << "\n Graph is empty.\n";
66  return Status::Success;
67  }
68  ARMNN_LOG(info) << "\n";
69  ARMNN_LOG(info) << "Walking Pattern: \n";
70 
71  for (auto&& it : TopologicalSort())
72  {
73  auto numInputSlots = it->GetNumInputSlots();
74  auto numOutputSlots = it->GetNumOutputSlots();
75 
76  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
77  << ":" << it->GetBackendId().Get()
78  << " has " << numInputSlots << " input slots"
79  << " and " << numOutputSlots << " output slots.";
80 
81  for (auto i : it->GetInputSlots())
82  {
83  std::ostringstream message;
84  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
85  unsigned int numDims = inputTensorShape.GetNumDimensions();
86 
87  message << "The input slot has shape [ ";
88  for (unsigned int dim=0; dim < numDims; dim++)
89  {
90  message << inputTensorShape[dim] << ",";
91  }
92  message << " ]";
93  ARMNN_LOG(info) << message.str();
94  }
95 
96  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
97  {
98  const armnn::Layer *layer = it;
99  std::ostringstream message;
100  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
101  unsigned int numDims = outputTensorShape.GetNumDimensions();
102 
103  message << "The output slot has shape [ ";
104  for (unsigned int dim=0; dim < numDims; dim++)
105  {
106  message << outputTensorShape[dim] << ",";
107  }
108  message << " ]";
109  ARMNN_LOG(info) << message.str();
110  }
111  ARMNN_LOG(info) << "\n";
112  }
113  ARMNN_LOG(info) << "\n\n";
114 
115  return Status::Success;
116 }
117 
118 Status Graph::SerializeToDot(std::ostream& stream)
119 {
120  {
121  DotGraph graph(stream, "Optimized");
122 
123  {
124  // Default node attributes:
125  DotDefaults nodes(stream, "node");
126  nodes.GetAttributeSet()
127  .AddAttribute("shape", "record");
128  }
129 
130  {
131  // Default edge attributes:
132  DotDefaults edges(stream, "edge");
133  edges.GetAttributeSet()
134  .AddAttribute("fontsize", 8)
135  .AddAttribute("fontcolor", "blue")
136  .AddAttribute("fontname", "arial-bold");
137  }
138 
139  // First declares the nodes.
140  for (auto&& layer : m_Layers)
141  {
142  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
143  // Extracts the layer parameters.
144  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
145  node.GetContents().AddContent(name + " : " + value);
146  };
147  layer->SerializeLayerParameters(extractParams);
148  }
149 
150  // Second declares the edges.
151  for (auto&& layer : m_Layers)
152  {
153  LayerGuid toId = layer->GetGuid();
154 
155  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
156  {
157  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
158  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
159  DotEdge edge(stream, fromId, toId);
160 
161  // Now print the tensor shape on the edge.
162  {
163  // Constructs the label attribute with HTML markup.
164  std::stringstream ss;
165  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
166  edge.GetAttributeSet().AddAttribute("label", ss);
167  }
168  }
169  }
170  }
171 
172  if (stream.bad())
173  {
174  return Status::Failure;
175  }
176  return Status::Success;
177 }
178 
180 {
181  // Layers must be sorted in topological order
182  ARMNN_ASSERT(m_LayersInOrder);
183  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
184 
185  std::unordered_set<const ITensorHandle*> preallocatedTensors;
186  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
187 
188  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
189  // is a TensorHandle, the function just returns it
190  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
191  {
192  ITensorHandle* ancestor = subTensorHandle;
193  while (ancestor && ancestor->GetParent())
194  {
195  ancestor = ancestor->GetParent();
196  }
197  return ancestor;
198  };
199 
200  // Checks whether a TensorHandle has been pre-allocated
201  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
202  {
203  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
204  };
205 
206  // Constant tensor handles need to last from the beginning of execution till the end,
207  // therefore we pre-allocate them upfront
208  for (auto&& layer : m_Layers)
209  {
210  if (layer->GetType() == LayerType::Constant)
211  {
212  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
213  {
214  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
215 
216  if (tensorHandle && !IsPreallocated(tensorHandle))
217  {
218  tensorHandle->Allocate();
219  preallocatedTensors.insert(tensorHandle);
220  }
221  }
222  }
223  }
224 
225  // Iterate over the network in topological order
226  for (auto&& layer : m_Layers)
227  {
228  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
229  // The first time we encounter a new tensor handle, we start managing its lifetime.
230  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
231  {
232  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
233 
234  if (tensorHandle && !IsPreallocated(tensorHandle))
235  {
236  unsigned int numConnections = slot->GetNumConnections();
237  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
238  {
239  handleReferenceCounts[tensorHandle] = numConnections;
240  tensorHandle->Manage();
241  if (handleReferenceCounts[tensorHandle] == 0u)
242  {
243  // if nobody consumes this tensor we call Allocate()
244  tensorHandle->Allocate();
245  }
246  }
247  else
248  {
249  handleReferenceCounts[tensorHandle] += numConnections;
250  }
251  }
252  }
253 
254  // Loop through the input slots in the same layer and decrement the reference counter associated
255  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
256  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
257  {
258  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
259  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
260 
261  if (tensorHandle && !IsPreallocated(tensorHandle))
262  {
263  --handleReferenceCounts[tensorHandle];
264 
265  if (handleReferenceCounts[tensorHandle] == 0u)
266  {
267  // Stop managing lifetime of tensor handle
268  tensorHandle->Allocate();
269  handleReferenceCounts.erase(tensorHandle);
270  }
271  }
272  }
273  }
274 
275  return Status::Success;
276 }
277 
279 {
280  if (!m_LayersInOrder)
281  {
282  // Resets layer order.
283  for (auto&& it : m_Layers)
284  {
285  it->ResetPriority();
286  }
287 
288  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
289  {
290  return layerA->GetPriority() < layerB->GetPriority();
291  };
292 
293  m_Layers.sort(compareLayerPriority);
294 
295  m_LayersInOrder = true;
296  }
297 
298  return *this;
299 }
300 
301 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
302  TensorHandleFactoryRegistry& registry)
303 {
304  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
305  // connections to other layers).
306  auto MayNeedCompatibilityLayer = [](const Layer& layer)
307  {
308  // All layers should have been associated with a valid compute device at this point.
309  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
310  // Does not need another compatibility layer if a copy or import layer is already present.
311  return layer.GetType() != LayerType::MemCopy &&
312  layer.GetType() != LayerType::MemImport;
313  };
314 
315  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
316  {
317  return strategy == EdgeStrategy::CopyToTarget ||
318  strategy == EdgeStrategy::ExportToTarget;
319  };
320 
321  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
322  {
323  ARMNN_ASSERT(srcLayer);
324 
325  if (!MayNeedCompatibilityLayer(*srcLayer))
326  {
327  // The current layer does not need copy layers, move to the next one
328  return;
329  }
330 
331  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
332  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
333  {
334  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
335  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
336  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
337  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
338  {
339  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
340  ARMNN_ASSERT(dstInputSlot);
341 
342  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
344  "Undefined memory strategy found while adding copy layers for compatibility");
345 
346  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
347  if (MayNeedCompatibilityLayer(dstLayer) &&
348  IsCompatibilityStrategy(strategy))
349  {
350  // A copy layer is needed in between the source and destination layers.
351  // Record the operation rather than attempting to modify the graph as we go.
352  // (invalidating iterators)
353  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
354  srcLayer->GetName(),
355  srcOutputIndex,
356  dstLayer.GetName(),
357  dstInputSlot->GetSlotIndex());
358  Layer* compLayer = nullptr;
359  if (strategy == EdgeStrategy::CopyToTarget)
360  {
361  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
362  }
363  else
364  {
365  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
366  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
367  }
368 
369  compLayer->SetBackendId(dstLayer.GetBackendId());
370 
371  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
372  auto backendIt = backends.find(dstLayer.GetBackendId());
373  if (backendIt != backends.end() &&
374  backendIt->second &&
375  backendIt->second->SupportsTensorAllocatorAPI())
376  {
377  auto backend = backendIt->second.get();
378  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
379  bool found = false;
380 
381  for (auto preference : tensorHandleFactoryIds)
382  {
383  auto factory = registry.GetFactory(preference);
384  if (factory)
385  {
386  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
387  auto srcFactory = registry.GetFactory(srcPref);
388 
389  if (srcFactory)
390  {
391  bool canExportImport =
392  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
393 
394  if (factory->SupportsMapUnmap() || canExportImport)
395  {
396  compOutputSlot.SetTensorHandleFactory(preference);
397  found = true;
398  break;
399  }
400  }
401  }
402  }
403 
404  if (!found)
405  {
407  }
408  }
409  else
410  {
412  }
413 
414  // The output strategy of a compatibility layer is always DirectCompatibility.
416 
417  // Recalculate the connection index on the previous layer as we have just inserted into it.
418  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
419  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
420  std::find(newSourceConnections.begin(),
421  newSourceConnections.end(),
422  &compLayer->GetInputSlot(0)));
423 
424  // The input strategy of a compatibility layer is always DirectCompatibilty.
425  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
427  }
428  }
429  }
430  });
431 }
432 
434 {
435  ARMNN_ASSERT(substituteLayer != nullptr);
436 
437  // Create a new sub-graph with only the given layer, using
438  // the given sub-graph as a reference of which parent graph to use
439  SubgraphView substituteSubgraph(substituteLayer);
440 
441  SubstituteSubgraph(subgraph, substituteSubgraph);
442 }
443 
444 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
445 {
446  // Look through each layer in the new subgraph and add any that are not already a member of this graph
447  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
448  {
449  if (std::find(std::begin(m_Layers),
450  std::end(m_Layers),
451  iConnectableLayer) == std::end(m_Layers))
452  {
453  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
454  layer->Reparent(*this, m_Layers.end());
455  m_LayersInOrder = false;
456  }
457  });
458 
459  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
460  EraseSubgraphLayers(subgraph);
461  TopologicalSort();
462 }
463 
464 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
465 {
466  ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
467  "New sub-graph used for substitution must not be empty");
468 
469  const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
470  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
471  {
472  IgnoreUnused(layer);
473  layer = PolymorphicDowncast<Layer*>(layer);
474  ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
475  "Substitute layer is not a member of graph");
476  });
477 
478  const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
479  const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
480 
481  unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
482  unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
483 
484  const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
485  const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
486 
487  ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
488  ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
489 
490  // Disconnect the sub-graph and replace it with the substitute sub-graph
491 
492  // Step 1: process input slots
493  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
494  {
495  IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
496  ARMNN_ASSERT(subgraphInputSlot);
497 
498  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
499  ARMNN_ASSERT(connectedOutputSlot);
500  connectedOutputSlot->Disconnect(*subgraphInputSlot);
501 
502  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
503  ARMNN_ASSERT(substituteInputSlot);
504  connectedOutputSlot->Connect(*substituteInputSlot);
505  }
506 
507  // Step 2: process output slots
508  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
509  {
510  auto subgraphOutputSlot =
511  PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
512  ARMNN_ASSERT(subgraphOutputSlot);
513 
514  auto substituteOutputSlot =
515  PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
516  ARMNN_ASSERT(substituteOutputSlot);
517 
518  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
519  }
520 }
521 
522 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
523 {
524 
525  for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
526  {
527  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
528  EraseLayer(layer);
529  }
530  subgraph.Clear();
531 }
532 
533 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
534 /// LayerValidationException thrown if no TensorInfo is set.
535 ///
536 /// @throws LayerValidationException
538 {
539  for (auto&& layer : TopologicalSort())
540  {
541  if (layer->GetType() == armnn::LayerType::Constant)
542  {
543  for (auto&& output: layer->GetOutputSlots())
544  {
545  if (!output.IsTensorInfoSet())
546  {
547  std::ostringstream message;
548  message << "Output slot TensorInfo not set on "
549  << GetLayerTypeAsCString(layer->GetType())
550  << " layer \""
551  << layer->GetName()
552  << "\"";
553  throw LayerValidationException(message.str());
554  }
555  }
556  }
557  }
558 }
559 
561 {
562  for (auto&& layer : TopologicalSort())
563  {
564  for (auto&& input : layer->GetInputSlots())
565  {
566  const IOutputSlot* source = input.GetConnectedOutputSlot();
567  if (source == NULL)
568  {
569  // Throws exception due to a layer input not being connected to an output slot.
570  // Verifies input slot weights and bias are set for FullyConnected layers.
571  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
572  }
573 
574  if (!source->IsTensorInfoSet())
575  {
576  std::ostringstream message;
577  message << "Output slot TensorInfo not set on "
578  << GetLayerTypeAsCString(layer->GetType())
579  << " layer "
580  << std::quoted(layer->GetName());
581  throw LayerValidationException(message.str());
582  }
583  }
584 
585  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
586  {
587  layer->ValidateTensorShapesFromInputs();
588  }
589  }
590 }
591 
592 /// Throws exception due to a layer input not being connected to an output slot.
593 /// Verifies weights and bias are set for layers on input slots 1
594 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
595 ///
596 /// @param layer constant pointer to a Layer object
597 /// @param slotIndex input slot index of layer
598 /// @throws LayerValidationException
599 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
600  unsigned int slotIndex)
601 {
602  std::ostringstream message;
603  bool noWeightsAndBias = false;
604 
605  if ((layer->GetType() == armnn::LayerType::FullyConnected ||
606  layer->GetType() == armnn::LayerType::Convolution3d) && slotIndex > 0)
607  {
608  // If weights are not set and is bias enabled, also check if bias is set
609  if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
610  {
611  const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
612  if (biasSource == NULL)
613  {
614  message << layer->GetName() << " layer weights and bias not set: ";
615  noWeightsAndBias = true;
616  }
617  }
618 
619  // Only weights or bias are not set
620  if (!noWeightsAndBias)
621  {
622  if (slotIndex == 1)
623  {
624  message << layer->GetName() << " layer weights not set: ";
625  }
626  else
627  {
628  message << layer->GetName() << " layer bias not set: ";
629  }
630  }
631  }
632 
633  std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
634  message << "Input slot(s) "
635  << slotString
636  << " not connected to an output slot on "
637  << GetLayerTypeAsCString(layer->GetType())
638  << " layer "
639  << std::quoted(layer->GetName());
640  throw LayerValidationException(message.str());
641 }
642 
643 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
644 {
645  return m_Profiler;
646 }
647 
648 } // namespace armnn
Graph(bool shapeInferenceMethod=false)
Definition: Graph.hpp:98
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:130
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
NodeContent & AddContent(const std::string &content)
const IOutputSlots & GetIOutputSlots() const
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:184
No strategy has been defined. Used internally to verify integrity of optimizations.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:318
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:118
const IConnectableLayers & GetIConnectableLayers() const
const IInputSlots & GetIInputSlots() const
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:131
Layer & GetOwningLayer() const
Definition: Layer.hpp:118
Source backends tensor data can be exported to destination backend tensor without copy...
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:467
DotAttributeSet & GetAttributeSet()
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
Destination backend can work directly with tensors on source backend.
The SubgraphView class represents a subgraph of a Graph.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:321
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
std::list< IConnectableLayer * > IConnectableLayers
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
NodeContent & GetContents()
An output connection slot for a layer.
Definition: INetwork.hpp:40
std::vector< IOutputSlot * > IOutputSlots
unsigned int GetSlotIndex() const
Definition: Layer.hpp:54
virtual void Disconnect(IInputSlot &slot)=0
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
Validate all output shapes.
void ForEachIConnectableLayer(Func func) const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:270
Status
enumeration
Definition: Types.hpp:29
std::vector< IInputSlot * > IInputSlots
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
const BackendId & GetBackendId() const
Definition: Layer.hpp:274
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:243
void VerifyConstantLayerSetTensorInfo() const
For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
Definition: Graph.cpp:537
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:433
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition: Graph.cpp:643
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:174
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:250
void InferTensorInfos()
Definition: Graph.cpp:560
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:363
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
virtual bool IsTensorInfoSet() const =0
virtual const IOutputSlot * GetConnection() const =0
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:323
DotAttributeSet & GetAttributeSet()
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:316
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:179
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:182
virtual int Connect(IInputSlot &destination)=0
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:179
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:66
const char * GetLayerTypeAsCString(LayerType type)
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:301
Status Print() const
Definition: Graph.cpp:61
static const FactoryId LegacyFactoryId
An input connection slot for a layer.
Definition: INetwork.hpp:26
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:327