ArmNN
 21.11
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Graph.hpp"
7 #include "SubgraphView.hpp"
8 #include "LayersFwd.hpp"
9 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/Utils.hpp>
16 #include <armnn/utility/Assert.hpp>
18 
19 #include <fmt/format.h>
20 
21 #include <unordered_map>
22 #include <DotSerializer.hpp>
23 #include <sstream>
24 
25 namespace armnn
26 {
27 
28 Graph::Graph(const Graph& other)
29 : m_LayersInOrder(other.m_LayersInOrder)
30 , m_Profiler(other.m_Profiler)
31 {
32  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
33 
34  for (auto&& otherLayer : other.m_Layers)
35  {
36  Layer* const layer = otherLayer->Clone(*this);
37  otherToClonedMap.emplace(otherLayer, layer);
38  }
39 
40  // Copies slot connections.
41  for (auto&& otherLayer : other.m_Layers)
42  {
43  Layer* const thisLayer = otherToClonedMap[otherLayer];
44 
45  auto outputSlot = thisLayer->BeginOutputSlots();
46  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
47  {
48  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
49  {
50  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
51  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
52 
53  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
54  outputSlot->Connect(inputSlot);
55  }
56  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
57  ++outputSlot;
58  }
59  }
60 }
61 
63 {
64  if (m_Layers.empty())
65  {
66  ARMNN_LOG(info) << "\n Graph is empty.\n";
67  return Status::Success;
68  }
69  ARMNN_LOG(info) << "\n";
70  ARMNN_LOG(info) << "Walking Pattern: \n";
71 
72  for (auto&& it : TopologicalSort())
73  {
74  auto numInputSlots = it->GetNumInputSlots();
75  auto numOutputSlots = it->GetNumOutputSlots();
76 
77  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
78  << ":" << it->GetBackendId().Get()
79  << " has " << numInputSlots << " input slots"
80  << " and " << numOutputSlots << " output slots.";
81 
82  for (auto i : it->GetInputSlots())
83  {
84  std::ostringstream message;
85  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
86  unsigned int numDims = inputTensorShape.GetNumDimensions();
87 
88  message << "The input slot has shape [ ";
89  for (unsigned int dim=0; dim < numDims; dim++)
90  {
91  message << inputTensorShape[dim] << ",";
92  }
93  message << " ]";
94  ARMNN_LOG(info) << message.str();
95  }
96 
97  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
98  {
99  const armnn::Layer *layer = it;
100  std::ostringstream message;
101  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
102  unsigned int numDims = outputTensorShape.GetNumDimensions();
103 
104  message << "The output slot has shape [ ";
105  for (unsigned int dim=0; dim < numDims; dim++)
106  {
107  message << outputTensorShape[dim] << ",";
108  }
109  message << " ]";
110  ARMNN_LOG(info) << message.str();
111  }
112  ARMNN_LOG(info) << "\n";
113  }
114  ARMNN_LOG(info) << "\n\n";
115 
116  return Status::Success;
117 }
118 
119 Status Graph::SerializeToDot(std::ostream& stream)
120 {
121  {
122  DotGraph graph(stream, "Optimized");
123 
124  {
125  // Default node attributes:
126  DotDefaults nodes(stream, "node");
127  nodes.GetAttributeSet()
128  .AddAttribute("shape", "record");
129  }
130 
131  {
132  // Default edge attributes:
133  DotDefaults edges(stream, "edge");
134  edges.GetAttributeSet()
135  .AddAttribute("fontsize", 8)
136  .AddAttribute("fontcolor", "blue")
137  .AddAttribute("fontname", "arial-bold");
138  }
139 
140  // First declares the nodes.
141  for (auto&& layer : m_Layers)
142  {
143  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
144  // Extracts the layer parameters.
145  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
146  node.GetContents().AddContent(name + " : " + value);
147  };
148  layer->SerializeLayerParameters(extractParams);
149  }
150 
151  // Second declares the edges.
152  for (auto&& layer : m_Layers)
153  {
154  LayerGuid toId = layer->GetGuid();
155 
156  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
157  {
158  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
159  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
160  DotEdge edge(stream, fromId, toId);
161 
162  // Now print the tensor shape on the edge.
163  {
164  // Constructs the label attribute with HTML markup.
165  std::stringstream ss;
166  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
167  edge.GetAttributeSet().AddAttribute("label", ss);
168  }
169  }
170  }
171  }
172 
173  if (stream.bad())
174  {
175  return Status::Failure;
176  }
177  return Status::Success;
178 }
179 
181 {
182  // Layers must be sorted in topological order
183  ARMNN_ASSERT(m_LayersInOrder);
184  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
185 
186  std::unordered_set<const ITensorHandle*> preallocatedTensors;
187  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
188 
189  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
190  // is a TensorHandle, the function just returns it
191  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
192  {
193  ITensorHandle* ancestor = subTensorHandle;
194  while (ancestor && ancestor->GetParent())
195  {
196  ancestor = ancestor->GetParent();
197  }
198  return ancestor;
199  };
200 
201  // Checks whether a TensorHandle has been pre-allocated
202  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
203  {
204  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
205  };
206 
207  // Constant tensor handles need to last from the beginning of execution till the end,
208  // therefore we pre-allocate them upfront
209  for (auto&& layer : m_Layers)
210  {
211  if (layer->GetType() == LayerType::Constant)
212  {
213  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
214  {
215  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
216 
217  if (tensorHandle && !IsPreallocated(tensorHandle))
218  {
219  tensorHandle->Allocate();
220  preallocatedTensors.insert(tensorHandle);
221  }
222  }
223  }
224  }
225 
226  // Iterate over the network in topological order
227  for (auto&& layer : m_Layers)
228  {
229  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
230  // The first time we encounter a new tensor handle, we start managing its lifetime.
231  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
232  {
233  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
234 
235  if (tensorHandle && !IsPreallocated(tensorHandle))
236  {
237  unsigned int numConnections = slot->GetNumConnections();
238  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
239  {
240  handleReferenceCounts[tensorHandle] = numConnections;
241  tensorHandle->Manage();
242  if (handleReferenceCounts[tensorHandle] == 0u)
243  {
244  // if nobody consumes this tensor we call Allocate()
245  tensorHandle->Allocate();
246  }
247  }
248  else
249  {
250  handleReferenceCounts[tensorHandle] += numConnections;
251  }
252  }
253  }
254 
255  // Loop through the input slots in the same layer and decrement the reference counter associated
256  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
257  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
258  {
259  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
260  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
261 
262  if (tensorHandle && !IsPreallocated(tensorHandle))
263  {
264  --handleReferenceCounts[tensorHandle];
265 
266  if (handleReferenceCounts[tensorHandle] == 0u)
267  {
268  // Stop managing lifetime of tensor handle
269  tensorHandle->Allocate();
270  handleReferenceCounts.erase(tensorHandle);
271  }
272  }
273  }
274  }
275 
276  return Status::Success;
277 }
278 
280 {
281  if (!m_LayersInOrder)
282  {
283  // Resets layer order.
284  for (auto&& it : m_Layers)
285  {
286  it->ResetPriority();
287  }
288 
289  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
290  {
291  return layerA->GetPriority() < layerB->GetPriority();
292  };
293 
294  m_Layers.sort(compareLayerPriority);
295 
296  m_LayersInOrder = true;
297  }
298 
299  return *this;
300 }
301 
302 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
303  TensorHandleFactoryRegistry& registry)
304 {
305  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
306  // connections to other layers).
307  auto MayNeedCompatibilityLayer = [](const Layer& layer)
308  {
309  // All layers should have been associated with a valid compute device at this point.
310  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
311  // Does not need another compatibility layer if a copy or import layer is already present.
312  return layer.GetType() != LayerType::MemCopy &&
313  layer.GetType() != LayerType::MemImport;
314  };
315 
316  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
317  {
318  return strategy == EdgeStrategy::CopyToTarget ||
319  strategy == EdgeStrategy::ExportToTarget;
320  };
321 
322  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
323  {
324  ARMNN_ASSERT(srcLayer);
325 
326  if (!MayNeedCompatibilityLayer(*srcLayer))
327  {
328  // The current layer does not need copy layers, move to the next one
329  return;
330  }
331 
332  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
333  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
334  {
335  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
336  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
337  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
338  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
339  {
340  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
341  ARMNN_ASSERT(dstInputSlot);
342 
343  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
345  "Undefined memory strategy found while adding copy layers for compatibility");
346 
347  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
348  if (MayNeedCompatibilityLayer(dstLayer) &&
349  IsCompatibilityStrategy(strategy))
350  {
351  // A copy layer is needed in between the source and destination layers.
352  // Record the operation rather than attempting to modify the graph as we go.
353  // (invalidating iterators)
354  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
355  srcLayer->GetName(),
356  srcOutputIndex,
357  dstLayer.GetName(),
358  dstInputSlot->GetSlotIndex());
359  Layer* compLayer = nullptr;
360  if (strategy == EdgeStrategy::CopyToTarget)
361  {
362  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
363  }
364  else
365  {
366  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
367  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
368  }
369 
370  compLayer->SetBackendId(dstLayer.GetBackendId());
371 
372  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
373  auto backendIt = backends.find(dstLayer.GetBackendId());
374  if (backendIt != backends.end() &&
375  backendIt->second &&
376  backendIt->second->SupportsTensorAllocatorAPI())
377  {
378  auto backend = backendIt->second.get();
379  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
380  bool found = false;
381 
382  for (auto preference : tensorHandleFactoryIds)
383  {
384  auto factory = registry.GetFactory(preference);
385  if (factory)
386  {
387  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
388  auto srcFactory = registry.GetFactory(srcPref);
389 
390  if (srcFactory)
391  {
392  bool canExportImport =
393  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
394 
395  if (factory->SupportsMapUnmap() || canExportImport)
396  {
397  compOutputSlot.SetTensorHandleFactory(preference);
398  found = true;
399  break;
400  }
401  }
402  }
403  }
404 
405  if (!found)
406  {
408  }
409  }
410  else
411  {
413  }
414 
415  // The output strategy of a compatibility layer is always DirectCompatibility.
417 
418  // Recalculate the connection index on the previous layer as we have just inserted into it.
419  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
420  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
421  std::find(newSourceConnections.begin(),
422  newSourceConnections.end(),
423  &compLayer->GetInputSlot(0)));
424 
425  // The input strategy of a compatibility layer is always DirectCompatibilty.
426  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
428  }
429  }
430  }
431  });
432 }
433 
435 {
436  ARMNN_ASSERT(substituteLayer != nullptr);
437 
438  ReplaceSubgraphConnections(subgraph, substituteLayer);
439  EraseSubgraphLayers(subgraph);
440 }
441 
442 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
443 {
444  // Look through each layer in the new subgraph and add any that are not already a member of this graph
445  substituteSubgraph.ForEachLayer([this](Layer* layer)
446  {
447  if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
448  {
449  layer->Reparent(*this, m_Layers.end());
450  m_LayersInOrder = false;
451  }
452  });
453 
454  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
455  EraseSubgraphLayers(subgraph);
456  TopologicalSort();
457 }
458 
459 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectableLayer* substituteLayer)
460 {
461  ARMNN_ASSERT(substituteLayer != nullptr);
462 
463  // Create a new sub-graph with only the given layer, using
464  // the given sub-graph as a reference of which parent graph to use
465  SubgraphView substituteSubgraph(substituteLayer);
466  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
467 }
468 
469 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
470 {
471  ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
472 
473  const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
474  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
475  {
476  IgnoreUnused(layer);
477  ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
478  "Substitute layer is not a member of graph");
479  });
480 
481  const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots();
482  const SubgraphView::OutputSlots& subgraphOutputSlots = subgraph.GetOutputSlots();
483 
484  unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
485  unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
486 
487  const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
488  const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
489 
490  ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
491  ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
492 
493  // Disconnect the sub-graph and replace it with the substitute sub-graph
494 
495  // Step 1: process input slots
496  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
497  {
498  InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
499  ARMNN_ASSERT(subgraphInputSlot);
500 
501  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
502  ARMNN_ASSERT(connectedOutputSlot);
503  connectedOutputSlot->Disconnect(*subgraphInputSlot);
504 
505  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
506  ARMNN_ASSERT(substituteInputSlot);
507  connectedOutputSlot->Connect(*substituteInputSlot);
508  }
509 
510  // Step 2: process output slots
511  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
512  {
513  OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
514  ARMNN_ASSERT(subgraphOutputSlot);
515 
516  OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
517  ARMNN_ASSERT(substituteOutputSlot);
518  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
519  }
520 }
521 
522 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
523 {
524  for (auto layer : subgraph.GetLayers())
525  {
526  EraseLayer(layer);
527  }
528  subgraph.Clear();
529 }
530 
531 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
532 /// LayerValidationException thrown if no TensorInfo is set.
533 ///
534 /// @throws LayerValidationException
536 {
537  for (auto&& layer : TopologicalSort())
538  {
539  if (layer->GetType() == armnn::LayerType::Constant)
540  {
541  for (auto&& output: layer->GetOutputSlots())
542  {
543  if (!output.IsTensorInfoSet())
544  {
545  std::ostringstream message;
546  message << "Output slot TensorInfo not set on "
547  << GetLayerTypeAsCString(layer->GetType())
548  << " layer \""
549  << layer->GetName()
550  << "\"";
551  throw LayerValidationException(message.str());
552  }
553  }
554  }
555  }
556 }
557 
559 {
560  for (auto&& layer : TopologicalSort())
561  {
562  for (auto&& input : layer->GetInputSlots())
563  {
564  const IOutputSlot* source = input.GetConnectedOutputSlot();
565  if (source == NULL)
566  {
567  // Throws exception due to a layer input not being connected to an output slot.
568  // Verifies input slot weights and bias are set for FullyConnected layers.
569  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
570  }
571 
572  if (!source->IsTensorInfoSet())
573  {
574  std::ostringstream message;
575  message << "Output slot TensorInfo not set on "
576  << GetLayerTypeAsCString(layer->GetType())
577  << " layer "
578  << std::quoted(layer->GetName());
579  throw LayerValidationException(message.str());
580  }
581  }
582 
583  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
584  {
585  layer->ValidateTensorShapesFromInputs();
586  }
587  }
588 }
589 
590 /// Throws exception due to a layer input not being connected to an output slot.
591 /// Verifies weights and bias are set for layers on input slots 1
592 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
593 ///
594 /// @param layer constant pointer to a Layer object
595 /// @param slotIndex input slot index of layer
596 /// @throws LayerValidationException
597 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
598  unsigned int slotIndex)
599 {
600  std::ostringstream message;
601  bool noWeightsAndBias = false;
602 
603  if ((layer->GetType() == armnn::LayerType::FullyConnected ||
604  layer->GetType() == armnn::LayerType::Convolution3d) && slotIndex > 0)
605  {
606  // If weights are not set and is bias enabled, also check if bias is set
607  if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
608  {
609  const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
610  if (biasSource == NULL)
611  {
612  message << layer->GetName() << " layer weights and bias not set: ";
613  noWeightsAndBias = true;
614  }
615  }
616 
617  // Only weights or bias are not set
618  if (!noWeightsAndBias)
619  {
620  if (slotIndex == 1)
621  {
622  message << layer->GetName() << " layer weights not set: ";
623  }
624  else
625  {
626  message << layer->GetName() << " layer bias not set: ";
627  }
628  }
629  }
630 
631  std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
632  message << "Input slot(s) "
633  << slotString
634  << " not connected to an output slot on "
635  << GetLayerTypeAsCString(layer->GetType())
636  << " layer "
637  << std::quoted(layer->GetName());
638  throw LayerValidationException(message.str());
639 }
640 
641 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
642 {
643  return m_Profiler;
644 }
645 
646 } // namespace armnn
Graph(bool shapeInferenceMethod=false)
Definition: Graph.hpp:96
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:125
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
NodeContent & AddContent(const std::string &content)
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:181
No strategy has been defined. Used internally to verify integrity of optimizations.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:119
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:126
virtual void Reparent(Graph &dest, std::list< Layer *>::const_iterator iterator)=0
Layer & GetOwningLayer() const
Definition: Layer.hpp:115
Source backends tensor data can be exported to destination backend tensor without copy...
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:464
std::vector< OutputSlot * > OutputSlots
DotAttributeSet & GetAttributeSet()
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
Destination backend can work directly with tensors on source backend.
The SubgraphView class represents a subgraph of a Graph.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
NodeContent & GetContents()
An output connection slot for a layer.
Definition: INetwork.hpp:37
unsigned int GetSlotIndex() const
Definition: Layer.hpp:53
virtual void Disconnect(IInputSlot &slot)=0
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
Validate all output shapes.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
Status
enumeration
Definition: Types.hpp:29
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:55
Layer & GetOwningLayer() const
Definition: Layer.hpp:52
const BackendId & GetBackendId() const
Definition: Layer.hpp:269
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:238
std::vector< InputSlot * > InputSlots
void VerifyConstantLayerSetTensorInfo() const
For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
Definition: Graph.cpp:535
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:434
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition: Graph.cpp:641
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:171
const InputSlots & GetInputSlots() const
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:245
const OutputSlots & GetOutputSlots() const
void InferTensorInfos()
Definition: Graph.cpp:558
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:349
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
virtual bool IsTensorInfoSet() const =0
const Layers & GetLayers() const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
DotAttributeSet & GetAttributeSet()
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:176
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180
virtual int Connect(IInputSlot &destination)=0
void ForEachLayer(Func func) const
std::list< Layer * > Layers
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:180
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63
const char * GetLayerTypeAsCString(LayerType type)
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:302
Status Print() const
Definition: Graph.cpp:62
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
Definition: Layer.cpp:116
An input connection slot for a layer.
Definition: INetwork.hpp:24
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:322