ArmNN
 24.02
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Graph.hpp>
7 #include <LayersFwd.hpp>
8 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/utility/Assert.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 
56  if (otherInputSlot->IsTensorInfoOverridden())
57  {
58  inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59  }
60  outputSlot->Connect(inputSlot);
61  }
62  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63  ++outputSlot;
64  }
65  }
66 }
67 
68 Status Graph::Print(bool extended) const
69 {
70  if (m_Layers.empty())
71  {
72  ARMNN_LOG(info) << "\n Graph is empty.\n";
73  return Status::Success;
74  }
75  ARMNN_LOG(info) << "\n";
76  ARMNN_LOG(info) << "Walking Pattern: \n";
77 
78  for (auto&& it : TopologicalSort())
79  {
80  auto numInputSlots = it->GetNumInputSlots();
81  auto numOutputSlots = it->GetNumOutputSlots();
82 
83  std::string guid;
84  if (extended)
85  {
86  guid += ":";
87  guid += std::to_string(it->GetGuid());
88  }
89  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
90  << ":" << it->GetBackendId().Get()
91  << guid
92  << " has " << numInputSlots << " input slots"
93  << " and " << numOutputSlots << " output slots.";
94 
95  for (auto i : it->GetInputSlots())
96  {
97  std::ostringstream message;
98  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
99  unsigned int numDims = inputTensorShape.GetNumDimensions();
100 
101  message << "The input slot has shape [ ";
102  for (unsigned int dim=0; dim < numDims; dim++)
103  {
104  message << inputTensorShape[dim] << ",";
105  }
106  message << " ]";
107  if (extended)
108  {
109  message << " Scale: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationScale();
110  message << " Offset: " << i.GetConnectedOutputSlot()->GetTensorInfo().GetQuantizationOffset();
111  message << " The input slot is connected to: ";
112  message << i.GetConnectedOutputSlot()->GetOwningIConnectableLayer().GetGuid();
113  }
114  ARMNN_LOG(info) << message.str();
115  }
116 
117  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
118  {
119  const armnn::Layer *layer = it;
120  std::ostringstream message;
121  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
122  unsigned int numDims = outputTensorShape.GetNumDimensions();
123 
124  message << "The output slot has shape [ ";
125  for (unsigned int dim=0; dim < numDims; dim++)
126  {
127  message << outputTensorShape[dim] << ",";
128  }
129  message << " ]";
130  if (extended)
131  {
132  message << " Scale: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationScale();
133  message << " Offset: " << layer->GetOutputSlots()[i].GetTensorInfo().GetQuantizationOffset();
134  message << " The output slot is connected to: ";
135  message << layer->GetOutputSlots()[i].GetConnection(0)->GetOwningIConnectableLayer().GetGuid();
136  }
137  ARMNN_LOG(info) << message.str();
138  }
139  ARMNN_LOG(info) << "\n";
140  }
141  ARMNN_LOG(info) << "\n\n";
142 
143  return Status::Success;
144 }
145 
146 Status Graph::SerializeToDot(std::ostream& stream)
147 {
148  {
149  DotGraph graph(stream, "Optimized");
150 
151  {
152  // Default node attributes:
153  DotDefaults nodes(stream, "node");
154  nodes.GetAttributeSet()
155  .AddAttribute("shape", "record");
156  }
157 
158  {
159  // Default edge attributes:
160  DotDefaults edges(stream, "edge");
161  edges.GetAttributeSet()
162  .AddAttribute("fontsize", 8)
163  .AddAttribute("fontcolor", "blue")
164  .AddAttribute("fontname", "arial-bold");
165  }
166 
167  // First declares the nodes.
168  for (auto&& layer : m_Layers)
169  {
170  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
171  // Extracts the layer parameters.
172  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
173  node.GetContents().AddContent(name + " : " + value);
174  };
175  layer->SerializeLayerParameters(extractParams);
176  }
177 
178  // Second declares the edges.
179  for (auto&& layer : m_Layers)
180  {
181  LayerGuid toId = layer->GetGuid();
182 
183  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
184  {
185  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
186  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
187  DotEdge edge(stream, fromId, toId);
188 
189  // Now print the tensor shape on the edge.
190  {
191  // Constructs the label attribute with HTML markup.
192  std::stringstream ss;
193  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
194  edge.GetAttributeSet().AddAttribute("label", ss);
195  }
196  }
197  }
198  }
199 
200  if (stream.bad())
201  {
202  return Status::Failure;
203  }
204  return Status::Success;
205 }
206 
208 {
209  // Layers must be sorted in topological order
210  ARMNN_ASSERT(m_LayersInOrder);
211  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
212 
213  std::unordered_set<const ITensorHandle*> preallocatedTensors;
214  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
215 
216  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
217  // is a TensorHandle, the function just returns it
218  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
219  {
220  ITensorHandle* ancestor = subTensorHandle;
221  while (ancestor && ancestor->GetParent())
222  {
223  ancestor = ancestor->GetParent();
224  }
225  return ancestor;
226  };
227 
228  // Checks whether a TensorHandle has been pre-allocated
229  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
230  {
231  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
232  };
233 
234  // Constant tensor handles need to last from the beginning of execution till the end,
235  // therefore we pre-allocate them upfront
236  for (auto&& layer : m_Layers)
237  {
238  if (layer->GetType() == LayerType::Constant)
239  {
240  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
241  {
242  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
243 
244  if (tensorHandle && !IsPreallocated(tensorHandle))
245  {
246  tensorHandle->Allocate();
247  preallocatedTensors.insert(tensorHandle);
248  }
249  }
250  }
251  }
252 
253  // Iterate over the network in topological order
254  for (auto&& layer : m_Layers)
255  {
256  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
257  // The first time we encounter a new tensor handle, we start managing its lifetime.
258  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
259  {
260  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
261 
262  if (tensorHandle && !IsPreallocated(tensorHandle))
263  {
264  unsigned int numConnections = slot->GetNumConnections();
265  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
266  {
267  handleReferenceCounts[tensorHandle] = numConnections;
268  tensorHandle->Manage();
269  if (handleReferenceCounts[tensorHandle] == 0u)
270  {
271  // if nobody consumes this tensor we call Allocate()
272  tensorHandle->Allocate();
273  }
274  }
275  else
276  {
277  handleReferenceCounts[tensorHandle] += numConnections;
278  }
279  }
280  }
281 
282  // Loop through the input slots in the same layer and decrement the reference counter associated
283  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
284  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
285  {
286  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
287  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
288 
289  if (tensorHandle && !IsPreallocated(tensorHandle))
290  {
291  --handleReferenceCounts[tensorHandle];
292 
293  if (handleReferenceCounts[tensorHandle] == 0u)
294  {
295  // Stop managing lifetime of tensor handle
296  tensorHandle->Allocate();
297  handleReferenceCounts.erase(tensorHandle);
298  }
299  }
300  }
301  }
302 
303  return Status::Success;
304 }
305 
306 const Graph& Graph::TopologicalSort() const
307 {
308  if (!m_LayersInOrder)
309  {
310  // Resets layer order.
311  for (auto&& it : m_Layers)
312  {
313  it->ResetPriority();
314  }
315 
316  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
317  {
318  return layerA->GetPriority() < layerB->GetPriority();
319  };
320 
321  m_Layers.sort(compareLayerPriority);
322 
323  m_LayersInOrder = true;
324  }
325 
326  return *this;
327 }
328 
329 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
330  TensorHandleFactoryRegistry& registry)
331 {
332  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
333  // connections to other layers).
334  auto MayNeedCompatibilityLayer = [](const Layer& layer)
335  {
336  // All layers should have been associated with a valid compute device at this point.
337  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
338  // Does not need another compatibility layer if a copy or import layer is already present.
339  return layer.GetType() != LayerType::MemCopy &&
340  layer.GetType() != LayerType::MemImport;
341  };
342 
343  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
344  {
345  return strategy == EdgeStrategy::CopyToTarget ||
346  strategy == EdgeStrategy::ExportToTarget;
347  };
348 
349  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
350  {
351  ARMNN_ASSERT(srcLayer);
352 
353  if (!MayNeedCompatibilityLayer(*srcLayer))
354  {
355  // The current layer does not need copy layers, move to the next one
356  return;
357  }
358 
359  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
360  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
361  {
362  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
363  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
364  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
365  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
366  {
367  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
368  ARMNN_ASSERT(dstInputSlot);
369 
370  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
372  "Undefined memory strategy found while adding copy layers for compatibility");
373 
374  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
375  if (MayNeedCompatibilityLayer(dstLayer) &&
376  IsCompatibilityStrategy(strategy))
377  {
378  // A copy layer is needed in between the source and destination layers.
379  // Record the operation rather than attempting to modify the graph as we go.
380  // (invalidating iterators)
381  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
382  srcLayer->GetName(),
383  srcOutputIndex,
384  dstLayer.GetName(),
385  dstInputSlot->GetSlotIndex());
386  Layer* compLayer = nullptr;
387  if (strategy == EdgeStrategy::CopyToTarget)
388  {
389  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
390  }
391  else
392  {
393  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
394  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
395  }
396 
397  compLayer->SetBackendId(dstLayer.GetBackendId());
398 
399  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
400  auto backendIt = backends.find(dstLayer.GetBackendId());
401  if (backendIt != backends.end() &&
402  backendIt->second &&
403  backendIt->second->SupportsTensorAllocatorAPI())
404  {
405  auto backend = backendIt->second.get();
406  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
407  bool found = false;
408 
409  for (auto preference : tensorHandleFactoryIds)
410  {
411  auto factory = registry.GetFactory(preference);
412  if (factory)
413  {
414  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
415  auto srcFactory = registry.GetFactory(srcPref);
416 
417  if (srcFactory)
418  {
419  bool canExportImport =
420  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
421 
422  if (factory->SupportsMapUnmap() || canExportImport)
423  {
424  compOutputSlot.SetTensorHandleFactory(preference);
425  found = true;
426  break;
427  }
428  }
429  }
430  }
431 
432  if (!found)
433  {
435  }
436  }
437  else
438  {
440  }
441 
442  // The output strategy of a compatibility layer is always DirectCompatibility.
444 
445  // Recalculate the connection index on the previous layer as we have just inserted into it.
446  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
447  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
448  std::find(newSourceConnections.begin(),
449  newSourceConnections.end(),
450  &compLayer->GetInputSlot(0)));
451 
452  // The input strategy of a compatibility layer is always DirectCompatibilty.
453  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
455  }
456  }
457  }
458  });
459 }
460 
462 {
463  ARMNN_ASSERT(substituteLayer != nullptr);
464 
465  // Create a new sub-graph with only the given layer, using
466  // the given sub-graph as a reference of which parent graph to use
467  SubgraphView substituteSubgraph(substituteLayer);
468 
469  SubstituteSubgraph(subgraph, substituteSubgraph);
470 }
471 
472 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
473 {
474  // Look through each layer in the new subgraph and add any that are not already a member of this graph
475  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
476  {
477  if (std::find(std::begin(m_Layers),
478  std::end(m_Layers),
479  iConnectableLayer) == std::end(m_Layers))
480  {
481  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
482  layer->Reparent(*this, m_Layers.end());
483  m_LayersInOrder = false;
484  }
485  });
486 
487  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
488  EraseSubgraphLayers(subgraph);
489  TopologicalSort();
490 }
491 
492 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
493 {
494  ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
495  "New sub-graph used for substitution must not be empty");
496 
497  const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
498  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
499  {
500  IgnoreUnused(layer);
501  layer = PolymorphicDowncast<Layer*>(layer);
502  ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
503  "Substitute layer is not a member of graph");
504  });
505 
506  const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
507  const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
508 
509  unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
510  unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
511 
512  const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
513  const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
514 
515  ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
516  ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
517 
518  // Disconnect the sub-graph and replace it with the substitute sub-graph
519 
520  // Step 1: process input slots
521  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
522  {
523  IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
524  ARMNN_ASSERT(subgraphInputSlot);
525 
526  // Only disconnect if the InputSlot has a connection, this might not be the case when
527  // dealing with working copies of SubgraphViews
528  // Note: we don't need this check for OutputSlot as it iterates over a vector of valid connections
529  if (subgraphInputSlot->GetConnection())
530  {
531  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
532  InputSlot* inputSlot = PolymorphicDowncast<InputSlot*>(subgraphInputSlot);
533  bool isOverridden = inputSlot->IsTensorInfoOverridden();
534 
535  ARMNN_ASSERT(connectedOutputSlot);
536  connectedOutputSlot->Disconnect(*subgraphInputSlot);
537 
538  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
539  ARMNN_ASSERT(substituteInputSlot);
540  connectedOutputSlot->Connect(*substituteInputSlot);
541 
542  if (isOverridden)
543  {
544  TensorInfo overridden = inputSlot->GetTensorInfo();
545  InputSlot* newInputSlot = PolymorphicDowncast<InputSlot*>(substituteInputSlot);
546  newInputSlot->SetTensorInfo(overridden);
547  }
548  }
549  }
550 
551  // Step 2: process output slots
552  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
553  {
554  auto subgraphOutputSlot =
555  PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
556  ARMNN_ASSERT(subgraphOutputSlot);
557 
558  auto substituteOutputSlot =
559  PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
560  ARMNN_ASSERT(substituteOutputSlot);
561 
562  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
563  }
564 }
565 
566 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
567 {
568 
569  for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
570  {
571  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
572  EraseLayer(layer);
573  }
574  subgraph.Clear();
575 }
576 
577 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
578 /// LayerValidationException thrown if no TensorInfo is set.
579 ///
580 /// @throws LayerValidationException
582 {
583  for (auto&& layer : TopologicalSort())
584  {
585  if (layer->GetType() == armnn::LayerType::Constant)
586  {
587  for (auto&& output: layer->GetOutputSlots())
588  {
589  if (!output.IsTensorInfoSet())
590  {
591  std::ostringstream message;
592  message << "Output slot TensorInfo not set on "
593  << GetLayerTypeAsCString(layer->GetType())
594  << " layer \""
595  << layer->GetName()
596  << "\"";
597  throw LayerValidationException(message.str());
598  }
599  }
600  }
601  }
602 }
603 
605 {
606  for (auto&& layer : TopologicalSort())
607  {
608  for (auto&& input : layer->GetInputSlots())
609  {
610  const IOutputSlot* source = input.GetConnectedOutputSlot();
611  if (source == NULL)
612  {
613  // Throws exception due to a layer input not being connected to an output slot.
614  // Verifies input slot weights and bias are set for FullyConnected layers.
615  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
616  }
617 
618  if (!source->IsTensorInfoSet())
619  {
620  std::ostringstream message;
621  message << "Output slot TensorInfo not set on "
622  << GetLayerTypeAsCString(layer->GetType())
623  << " layer "
624  << std::quoted(layer->GetName());
625  throw LayerValidationException(message.str());
626  }
627  }
628 
629  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
630  {
631  layer->ValidateTensorShapesFromInputs();
632  }
633  }
634 }
635 
636 /// Throws exception due to a layer input not being connected to an output slot.
637 /// Verifies weights and bias are set for layers on input slots 1
638 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
639 ///
640 /// @param layer constant pointer to a Layer object
641 /// @param slotIndex input slot index of layer
642 /// @throws LayerValidationException
643 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
644  unsigned int slotIndex)
645 {
646  std::ostringstream message;
647  bool noWeightsAndBias = false;
648 
649  if ((layer->GetType() == armnn::LayerType::FullyConnected ||
652  layer->GetType() == armnn::LayerType::DepthwiseConvolution2d) && slotIndex > 0)
653  {
654  message << std::endl;
655 
656  // If weights are not set and is bias enabled, also check if bias is set
657  if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
658  {
659  const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
660  if (biasSource == NULL)
661  {
662  message << "Weights and bias layers not set." << std::endl;
663  noWeightsAndBias = true;
664  }
665  }
666 
667  // Only weights or bias are not set
668  if (!noWeightsAndBias)
669  {
670  if (slotIndex == 1)
671  {
672  message << "Weights layer not set." << std::endl;
673  }
674  else
675  {
676  message << "Bias layer not set." << std::endl;
677  }
678  }
679  }
680 
681  std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
682  message << "Input slot(s) "
683  << slotString
684  << " for "
685  << GetLayerTypeAsCString(layer->GetType())
686  << " not connected to an output slot. " << std::endl
687  << "Layer name: "
688  << std::quoted(layer->GetName());
689  throw LayerValidationException(message.str());
690 }
691 
692 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
693 {
694  return m_Profiler;
695 }
696 
698 {
699  m_LayersInOrder = false;
700 }
701 
702 } // namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::OutputSlot::GetEdgeStrategies
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:146
armnn::Graph::SetLayersOutOfOrder
void SetLayersOutOfOrder()
Definition: Graph.cpp:697
armnn::Compute::Undefined
@ Undefined
armnn::OutputSlot::SetTensorHandleFactory
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:200
armnn::InputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition: Layer.cpp:587
armnn::SubgraphView::IOutputSlots
std::vector< IOutputSlot * > IOutputSlots
Definition: SubgraphView.hpp:60
armnn::ITensorHandle::Manage
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::Graph::AllocateDynamicBuffers
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:207
armnn::DotAttributeSet::AddAttribute
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
Definition: DotSerializer.cpp:95
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::Graph::EraseLayer
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:461
TypesUtils.hpp
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::TensorInfo
Definition: Tensor.hpp:152
Graph.hpp
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
BackendId.hpp
armnn::Graph::VerifyConstantLayerSetTensorInfo
void VerifyConstantLayerSetTensorInfo() const
For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
Definition: Graph.cpp:581
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
DotSerializer.hpp
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
NumericCast.hpp
armnn::DotEdge
Definition: DotSerializer.hpp:77
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
Assert.hpp
armnn::InputSlot::GetSlotIndex
unsigned int GetSlotIndex() const override
Definition: Layer.hpp:54
armnn::SubgraphView::IConnectableLayers
std::list< IConnectableLayer * > IConnectableLayers
Definition: SubgraphView.hpp:62
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::DotNode::GetContents
NodeContent & GetContents()
Definition: DotSerializer.hpp:107
armnn::SubgraphView::GetIConnectableLayers
const IConnectableLayers & GetIConnectableLayers() const
Definition: SubgraphView.cpp:278
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::NodeContent::AddContent
NodeContent & AddContent(const std::string &content)
Definition: DotSerializer.cpp:147
Logging.hpp
armnn::DotGraph
Definition: DotSerializer.hpp:125
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
IBackendInternal.hpp
LayersFwd.hpp
armnn::ITensorHandle::GetParent
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::DotNode
Definition: DotSerializer.hpp:101
armnn::Graph::Print
Status Print(bool extended=false) const
Definition: Graph.cpp:68
armnn::SubgraphView::IInputSlots
std::vector< IInputSlot * > IInputSlots
Definition: SubgraphView.hpp:58
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Graph::GetProfiler
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition: Graph.cpp:692
armnn::SubgraphView
The SubgraphView class represents a subgraph of a Graph.
Definition: SubgraphView.hpp:31
armnn::Status::Success
@ Success
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::InputSlot::IsTensorInfoOverridden
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition: Layer.cpp:609
armnn::Graph::InferTensorInfos
void InferTensorInfos()
Definition: Graph.cpp:604
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::MemImport
@ MemImport
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::InputSlot
Definition: Layer.hpp:42
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::LayerType::FullyConnected
@ FullyConnected
SubgraphView.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::Graph::AddCompatibilityLayers
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices,...
Definition: Graph.cpp:329
armnn::DotEdge::GetAttributeSet
DotAttributeSet & GetAttributeSet()
Definition: DotSerializer.hpp:83
armnn::Status
Status
Definition: Types.hpp:42
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::Layer::Clone
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:266
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
armnn::SubgraphView::GetIInputSlots
const IInputSlots & GetIInputSlots() const
Definition: SubgraphView.cpp:233
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::OutputSlot::GetConnections
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:145
armnn::OutputSlot::SetEdgeStrategy
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:210
armnn::Graph::SerializeToDot
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:146
armnn::Layer::GetBackendId
const BackendId & GetBackendId() const
Definition: Layer.hpp:290
armnn::DotDefaults
Definition: DotSerializer.hpp:114
armnn::BackendId
Definition: BackendId.hpp:75
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::SubgraphView::ForEachIConnectableLayer
void ForEachIConnectableLayer(Func func) const
Definition: SubgraphView.hpp:46
armnn::Layer::SetBackendId
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
Definition: Layer.hpp:291
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::DotDefaults::GetAttributeSet
DotAttributeSet & GetAttributeSet()
Definition: DotSerializer.hpp:120
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::Graph
Definition: Graph.hpp:30
armnn::SubgraphView::GetIOutputSlots
const IOutputSlots & GetIOutputSlots() const
Definition: SubgraphView.cpp:238
armnn::Status::Failure
@ Failure
armnn::ITensorHandle::Allocate
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
armnn::LayerType::Constant
@ Constant
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0