ArmNN
 23.08
Graph.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Graph.hpp>
7 #include <LayersFwd.hpp>
8 
11 
12 #include <armnn/BackendId.hpp>
13 #include <armnn/Logging.hpp>
14 #include <armnn/TypesUtils.hpp>
15 #include <armnn/utility/Assert.hpp>
17 
18 #include <fmt/format.h>
19 
20 #include <unordered_map>
21 #include <DotSerializer.hpp>
22 #include <sstream>
23 
24 namespace armnn
25 {
26 
27 Graph::Graph(const Graph& other)
28 : m_LayersInOrder(other.m_LayersInOrder)
29 , m_AllowExpandedDims(other.m_AllowExpandedDims)
30 , m_ShapeInferenceMethod(other.m_ShapeInferenceMethod)
31 , m_Profiler(other.m_Profiler)
32 {
33  std::unordered_map<const Layer*, Layer*> otherToClonedMap;
34 
35  for (auto&& otherLayer : other.m_Layers)
36  {
37  Layer* const layer = otherLayer->Clone(*this);
38  otherToClonedMap.emplace(otherLayer, layer);
39  }
40 
41  // Copies slot connections.
42  for (auto&& otherLayer : other.m_Layers)
43  {
44  Layer* const thisLayer = otherToClonedMap[otherLayer];
45 
46  auto outputSlot = thisLayer->BeginOutputSlots();
47  for (auto&& otherOutputSlot : otherLayer->GetOutputSlots())
48  {
49  for (auto&& otherInputSlot : otherOutputSlot.GetConnections())
50  {
51  const Layer& otherTgtLayer = otherInputSlot->GetOwningLayer();
52  Layer* const thisTgtLayer = otherToClonedMap[&otherTgtLayer];
53 
54  InputSlot& inputSlot = thisTgtLayer->GetInputSlot(otherInputSlot->GetSlotIndex());
55 
56  if (otherInputSlot->IsTensorInfoOverridden())
57  {
58  inputSlot.SetTensorInfo(otherInputSlot->GetTensorInfo());
59  }
60  outputSlot->Connect(inputSlot);
61  }
62  outputSlot->SetTensorInfo(otherOutputSlot.GetTensorInfo());
63  ++outputSlot;
64  }
65  }
66 }
67 
69 {
70  if (m_Layers.empty())
71  {
72  ARMNN_LOG(info) << "\n Graph is empty.\n";
73  return Status::Success;
74  }
75  ARMNN_LOG(info) << "\n";
76  ARMNN_LOG(info) << "Walking Pattern: \n";
77 
78  for (auto&& it : TopologicalSort())
79  {
80  auto numInputSlots = it->GetNumInputSlots();
81  auto numOutputSlots = it->GetNumOutputSlots();
82 
83  ARMNN_LOG(info) << it->GetName() << ":" << GetLayerTypeAsCString(it->GetType())
84  << ":" << it->GetBackendId().Get()
85  << " has " << numInputSlots << " input slots"
86  << " and " << numOutputSlots << " output slots.";
87 
88  for (auto i : it->GetInputSlots())
89  {
90  std::ostringstream message;
91  auto inputTensorShape = i.GetConnectedOutputSlot()->GetTensorInfo().GetShape();
92  unsigned int numDims = inputTensorShape.GetNumDimensions();
93 
94  message << "The input slot has shape [ ";
95  for (unsigned int dim=0; dim < numDims; dim++)
96  {
97  message << inputTensorShape[dim] << ",";
98  }
99  message << " ]";
100  ARMNN_LOG(info) << message.str();
101  }
102 
103  for (unsigned int i = 0; i < it->GetNumOutputSlots(); i++)
104  {
105  const armnn::Layer *layer = it;
106  std::ostringstream message;
107  auto outputTensorShape = layer->GetOutputSlots()[i].GetTensorInfo().GetShape();
108  unsigned int numDims = outputTensorShape.GetNumDimensions();
109 
110  message << "The output slot has shape [ ";
111  for (unsigned int dim=0; dim < numDims; dim++)
112  {
113  message << outputTensorShape[dim] << ",";
114  }
115  message << " ]";
116  ARMNN_LOG(info) << message.str();
117  }
118  ARMNN_LOG(info) << "\n";
119  }
120  ARMNN_LOG(info) << "\n\n";
121 
122  return Status::Success;
123 }
124 
125 Status Graph::SerializeToDot(std::ostream& stream)
126 {
127  {
128  DotGraph graph(stream, "Optimized");
129 
130  {
131  // Default node attributes:
132  DotDefaults nodes(stream, "node");
133  nodes.GetAttributeSet()
134  .AddAttribute("shape", "record");
135  }
136 
137  {
138  // Default edge attributes:
139  DotDefaults edges(stream, "edge");
140  edges.GetAttributeSet()
141  .AddAttribute("fontsize", 8)
142  .AddAttribute("fontcolor", "blue")
143  .AddAttribute("fontname", "arial-bold");
144  }
145 
146  // First declares the nodes.
147  for (auto&& layer : m_Layers)
148  {
149  DotNode node(stream, layer->GetGuid(), GetLayerTypeAsCString(layer->GetType()));
150  // Extracts the layer parameters.
151  ParameterStringifyFunction extractParams = [&node](const std::string & name, const std::string & value){
152  node.GetContents().AddContent(name + " : " + value);
153  };
154  layer->SerializeLayerParameters(extractParams);
155  }
156 
157  // Second declares the edges.
158  for (auto&& layer : m_Layers)
159  {
160  LayerGuid toId = layer->GetGuid();
161 
162  for (unsigned int i=0;i<layer->GetNumInputSlots(); i++)
163  {
164  OutputSlot* outputSlot = static_cast<OutputSlot*>(layer->GetInputSlot(i).GetConnection());
165  LayerGuid fromId = outputSlot->GetOwningLayer().GetGuid();
166  DotEdge edge(stream, fromId, toId);
167 
168  // Now print the tensor shape on the edge.
169  {
170  // Constructs the label attribute with HTML markup.
171  std::stringstream ss;
172  ss << "< " << outputSlot->GetTensorInfo().GetShape() << " >";
173  edge.GetAttributeSet().AddAttribute("label", ss);
174  }
175  }
176  }
177  }
178 
179  if (stream.bad())
180  {
181  return Status::Failure;
182  }
183  return Status::Success;
184 }
185 
187 {
188  // Layers must be sorted in topological order
189  ARMNN_ASSERT(m_LayersInOrder);
190  ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
191 
192  std::unordered_set<const ITensorHandle*> preallocatedTensors;
193  std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
194 
195  // Finds the first TensorHandle ancestor of a SubTensorHandle. If the ITensorHandle provided
196  // is a TensorHandle, the function just returns it
197  auto TraceSubTensorHandleAncestry = [](ITensorHandle* const subTensorHandle)
198  {
199  ITensorHandle* ancestor = subTensorHandle;
200  while (ancestor && ancestor->GetParent())
201  {
202  ancestor = ancestor->GetParent();
203  }
204  return ancestor;
205  };
206 
207  // Checks whether a TensorHandle has been pre-allocated
208  auto IsPreallocated = [&](ITensorHandle* const tensorHandle)
209  {
210  return tensorHandle && preallocatedTensors.find(tensorHandle) != preallocatedTensors.end();
211  };
212 
213  // Constant tensor handles need to last from the beginning of execution till the end,
214  // therefore we pre-allocate them upfront
215  for (auto&& layer : m_Layers)
216  {
217  if (layer->GetType() == LayerType::Constant)
218  {
219  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
220  {
221  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
222 
223  if (tensorHandle && !IsPreallocated(tensorHandle))
224  {
225  tensorHandle->Allocate();
226  preallocatedTensors.insert(tensorHandle);
227  }
228  }
229  }
230  }
231 
232  // Iterate over the network in topological order
233  for (auto&& layer : m_Layers)
234  {
235  // Count the amount of times each output slot references a certain buffer (ITensorHandle).
236  // The first time we encounter a new tensor handle, we start managing its lifetime.
237  for (auto&& slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
238  {
239  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(slot->GetOutputHandler().GetData());
240 
241  if (tensorHandle && !IsPreallocated(tensorHandle))
242  {
243  unsigned int numConnections = slot->GetNumConnections();
244  if (handleReferenceCounts.find(tensorHandle) == handleReferenceCounts.end())
245  {
246  handleReferenceCounts[tensorHandle] = numConnections;
247  tensorHandle->Manage();
248  if (handleReferenceCounts[tensorHandle] == 0u)
249  {
250  // if nobody consumes this tensor we call Allocate()
251  tensorHandle->Allocate();
252  }
253  }
254  else
255  {
256  handleReferenceCounts[tensorHandle] += numConnections;
257  }
258  }
259  }
260 
261  // Loop through the input slots in the same layer and decrement the reference counter associated
262  // to each tensor handle we encounter. Once it reaches zero, we end the lifetime of the tensor handle
263  for (auto&& slot = layer->BeginInputSlots(); slot != layer->EndInputSlots(); ++slot)
264  {
265  ITensorHandle *tensorHandle = TraceSubTensorHandleAncestry(
266  slot->GetConnectedOutputSlot()->GetOutputHandler().GetData());
267 
268  if (tensorHandle && !IsPreallocated(tensorHandle))
269  {
270  --handleReferenceCounts[tensorHandle];
271 
272  if (handleReferenceCounts[tensorHandle] == 0u)
273  {
274  // Stop managing lifetime of tensor handle
275  tensorHandle->Allocate();
276  handleReferenceCounts.erase(tensorHandle);
277  }
278  }
279  }
280  }
281 
282  return Status::Success;
283 }
284 
285 const Graph& Graph::TopologicalSort() const
286 {
287  if (!m_LayersInOrder)
288  {
289  // Resets layer order.
290  for (auto&& it : m_Layers)
291  {
292  it->ResetPriority();
293  }
294 
295  auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
296  {
297  return layerA->GetPriority() < layerB->GetPriority();
298  };
299 
300  m_Layers.sort(compareLayerPriority);
301 
302  m_LayersInOrder = true;
303  }
304 
305  return *this;
306 }
307 
308 void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendInternal>>& backends,
309  TensorHandleFactoryRegistry& registry)
310 {
311  // Returns true if the given layer could potentially need an intermediate copy/import layer (depending on its
312  // connections to other layers).
313  auto MayNeedCompatibilityLayer = [](const Layer& layer)
314  {
315  // All layers should have been associated with a valid compute device at this point.
316  ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
317  // Does not need another compatibility layer if a copy or import layer is already present.
318  return layer.GetType() != LayerType::MemCopy &&
319  layer.GetType() != LayerType::MemImport;
320  };
321 
322  auto IsCompatibilityStrategy = [](EdgeStrategy strategy)
323  {
324  return strategy == EdgeStrategy::CopyToTarget ||
325  strategy == EdgeStrategy::ExportToTarget;
326  };
327 
328  ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
329  {
330  ARMNN_ASSERT(srcLayer);
331 
332  if (!MayNeedCompatibilityLayer(*srcLayer))
333  {
334  // The current layer does not need copy layers, move to the next one
335  return;
336  }
337 
338  const std::vector<OutputSlot>& srcOutputSlots = srcLayer->GetOutputSlots();
339  for (unsigned int srcOutputIndex = 0; srcOutputIndex < srcOutputSlots.size(); srcOutputIndex++)
340  {
341  OutputSlot& srcOutputSlot = srcLayer->GetOutputSlot(srcOutputIndex);
342  const std::vector<InputSlot*> srcConnections = srcOutputSlot.GetConnections();
343  const std::vector<EdgeStrategy> srcEdgeStrategies = srcOutputSlot.GetEdgeStrategies();
344  for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
345  {
346  InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
347  ARMNN_ASSERT(dstInputSlot);
348 
349  EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
351  "Undefined memory strategy found while adding copy layers for compatibility");
352 
353  const Layer& dstLayer = dstInputSlot->GetOwningLayer();
354  if (MayNeedCompatibilityLayer(dstLayer) &&
355  IsCompatibilityStrategy(strategy))
356  {
357  // A copy layer is needed in between the source and destination layers.
358  // Record the operation rather than attempting to modify the graph as we go.
359  // (invalidating iterators)
360  const std::string compLayerName = fmt::format("[ {} ({}) -> {} ({}) ]",
361  srcLayer->GetName(),
362  srcOutputIndex,
363  dstLayer.GetName(),
364  dstInputSlot->GetSlotIndex());
365  Layer* compLayer = nullptr;
366  if (strategy == EdgeStrategy::CopyToTarget)
367  {
368  compLayer = InsertNewLayer<MemCopyLayer>(*dstInputSlot, compLayerName.c_str());
369  }
370  else
371  {
372  ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
373  compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
374  }
375 
376  compLayer->SetBackendId(dstLayer.GetBackendId());
377 
378  OutputSlot& compOutputSlot = compLayer->GetOutputSlot(0);
379  auto backendIt = backends.find(dstLayer.GetBackendId());
380  if (backendIt != backends.end() &&
381  backendIt->second &&
382  backendIt->second->SupportsTensorAllocatorAPI())
383  {
384  auto backend = backendIt->second.get();
385  auto tensorHandleFactoryIds = backend->GetHandleFactoryPreferences();
386  bool found = false;
387 
388  for (auto preference : tensorHandleFactoryIds)
389  {
390  auto factory = registry.GetFactory(preference);
391  if (factory)
392  {
393  auto srcPref = srcOutputSlot.GetTensorHandleFactoryId();
394  auto srcFactory = registry.GetFactory(srcPref);
395 
396  if (srcFactory)
397  {
398  bool canExportImport =
399  (factory->GetImportFlags() & srcFactory->GetExportFlags()) != 0;
400 
401  if (factory->SupportsMapUnmap() || canExportImport)
402  {
403  compOutputSlot.SetTensorHandleFactory(preference);
404  found = true;
405  break;
406  }
407  }
408  }
409  }
410 
411  if (!found)
412  {
414  }
415  }
416  else
417  {
419  }
420 
421  // The output strategy of a compatibility layer is always DirectCompatibility.
423 
424  // Recalculate the connection index on the previous layer as we have just inserted into it.
425  const std::vector<InputSlot*>& newSourceConnections = srcOutputSlot.GetConnections();
426  auto newSrcConnectionIndex = std::distance(newSourceConnections.begin(),
427  std::find(newSourceConnections.begin(),
428  newSourceConnections.end(),
429  &compLayer->GetInputSlot(0)));
430 
431  // The input strategy of a compatibility layer is always DirectCompatibilty.
432  srcOutputSlot.SetEdgeStrategy(armnn::numeric_cast<unsigned int>(newSrcConnectionIndex),
434  }
435  }
436  }
437  });
438 }
439 
441 {
442  ARMNN_ASSERT(substituteLayer != nullptr);
443 
444  // Create a new sub-graph with only the given layer, using
445  // the given sub-graph as a reference of which parent graph to use
446  SubgraphView substituteSubgraph(substituteLayer);
447 
448  SubstituteSubgraph(subgraph, substituteSubgraph);
449 }
450 
451 void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
452 {
453  // Look through each layer in the new subgraph and add any that are not already a member of this graph
454  substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
455  {
456  if (std::find(std::begin(m_Layers),
457  std::end(m_Layers),
458  iConnectableLayer) == std::end(m_Layers))
459  {
460  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
461  layer->Reparent(*this, m_Layers.end());
462  m_LayersInOrder = false;
463  }
464  });
465 
466  ReplaceSubgraphConnections(subgraph, substituteSubgraph);
467  EraseSubgraphLayers(subgraph);
468  TopologicalSort();
469 }
470 
471 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
472 {
473  ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
474  "New sub-graph used for substitution must not be empty");
475 
476  const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
477  std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
478  {
479  IgnoreUnused(layer);
480  layer = PolymorphicDowncast<Layer*>(layer);
481  ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
482  "Substitute layer is not a member of graph");
483  });
484 
485  const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
486  const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
487 
488  unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
489  unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
490 
491  const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
492  const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
493 
494  ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
495  ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
496 
497  // Disconnect the sub-graph and replace it with the substitute sub-graph
498 
499  // Step 1: process input slots
500  for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
501  {
502  IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
503  ARMNN_ASSERT(subgraphInputSlot);
504 
505  // Only disconnect if the InputSlot has a connection, this might not be the case when
506  // dealing with working copies of SubgraphViews
507  // Note: we don't need this check for OutputSlot as it iterates over a vector of valid connections
508  if (subgraphInputSlot->GetConnection())
509  {
510  IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
511  InputSlot* inputSlot = PolymorphicDowncast<InputSlot*>(subgraphInputSlot);
512  bool isOverridden = inputSlot->IsTensorInfoOverridden();
513 
514  ARMNN_ASSERT(connectedOutputSlot);
515  connectedOutputSlot->Disconnect(*subgraphInputSlot);
516 
517  IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
518  ARMNN_ASSERT(substituteInputSlot);
519  connectedOutputSlot->Connect(*substituteInputSlot);
520 
521  if (isOverridden)
522  {
523  TensorInfo overridden = inputSlot->GetTensorInfo();
524  InputSlot* newInputSlot = PolymorphicDowncast<InputSlot*>(substituteInputSlot);
525  newInputSlot->SetTensorInfo(overridden);
526  }
527  }
528  }
529 
530  // Step 2: process output slots
531  for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
532  {
533  auto subgraphOutputSlot =
534  PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
535  ARMNN_ASSERT(subgraphOutputSlot);
536 
537  auto substituteOutputSlot =
538  PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
539  ARMNN_ASSERT(substituteOutputSlot);
540 
541  subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
542  }
543 }
544 
545 void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
546 {
547 
548  for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
549  {
550  auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
551  EraseLayer(layer);
552  }
553  subgraph.Clear();
554 }
555 
556 /// For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
557 /// LayerValidationException thrown if no TensorInfo is set.
558 ///
559 /// @throws LayerValidationException
561 {
562  for (auto&& layer : TopologicalSort())
563  {
564  if (layer->GetType() == armnn::LayerType::Constant)
565  {
566  for (auto&& output: layer->GetOutputSlots())
567  {
568  if (!output.IsTensorInfoSet())
569  {
570  std::ostringstream message;
571  message << "Output slot TensorInfo not set on "
572  << GetLayerTypeAsCString(layer->GetType())
573  << " layer \""
574  << layer->GetName()
575  << "\"";
576  throw LayerValidationException(message.str());
577  }
578  }
579  }
580  }
581 }
582 
584 {
585  for (auto&& layer : TopologicalSort())
586  {
587  for (auto&& input : layer->GetInputSlots())
588  {
589  const IOutputSlot* source = input.GetConnectedOutputSlot();
590  if (source == NULL)
591  {
592  // Throws exception due to a layer input not being connected to an output slot.
593  // Verifies input slot weights and bias are set for FullyConnected layers.
594  ConstructErrorMessageForUnconnectedInputs(layer, input.GetSlotIndex());
595  }
596 
597  if (!source->IsTensorInfoSet())
598  {
599  std::ostringstream message;
600  message << "Output slot TensorInfo not set on "
601  << GetLayerTypeAsCString(layer->GetType())
602  << " layer "
603  << std::quoted(layer->GetName());
604  throw LayerValidationException(message.str());
605  }
606  }
607 
608  if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
609  {
610  layer->ValidateTensorShapesFromInputs();
611  }
612  }
613 }
614 
615 /// Throws exception due to a layer input not being connected to an output slot.
616 /// Verifies weights and bias are set for layers on input slots 1
617 /// and 2 respectively. Method checks if bias is enabled before ensuring it is set.
618 ///
619 /// @param layer constant pointer to a Layer object
620 /// @param slotIndex input slot index of layer
621 /// @throws LayerValidationException
622 void Graph::ConstructErrorMessageForUnconnectedInputs(Layer* const layer,
623  unsigned int slotIndex)
624 {
625  std::ostringstream message;
626  bool noWeightsAndBias = false;
627 
628  if ((layer->GetType() == armnn::LayerType::FullyConnected ||
631  layer->GetType() == armnn::LayerType::DepthwiseConvolution2d) && slotIndex > 0)
632  {
633  message << std::endl;
634 
635  // If weights are not set and is bias enabled, also check if bias is set
636  if (slotIndex == 1 && layer->GetNumInputSlots() == 3)
637  {
638  const IOutputSlot* biasSource = layer->GetInputSlot(2).GetConnectedOutputSlot();
639  if (biasSource == NULL)
640  {
641  message << "Weights and bias layers not set." << std::endl;
642  noWeightsAndBias = true;
643  }
644  }
645 
646  // Only weights or bias are not set
647  if (!noWeightsAndBias)
648  {
649  if (slotIndex == 1)
650  {
651  message << "Weights layer not set." << std::endl;
652  }
653  else
654  {
655  message << "Bias layer not set." << std::endl;
656  }
657  }
658  }
659 
660  std::string slotString = noWeightsAndBias ? "1 & 2" : std::to_string(slotIndex);
661  message << "Input slot(s) "
662  << slotString
663  << " for "
664  << GetLayerTypeAsCString(layer->GetType())
665  << " not connected to an output slot. " << std::endl
666  << "Layer name: "
667  << std::quoted(layer->GetName());
668  throw LayerValidationException(message.str());
669 }
670 
671 const std::shared_ptr<IProfiler>& Graph::GetProfiler() const
672 {
673  return m_Profiler;
674 }
675 
677 {
678  m_LayersInOrder = false;
679 }
680 
681 } // namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::OutputSlot::GetEdgeStrategies
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
Definition: Layer.hpp:146
armnn::Graph::SetLayersOutOfOrder
void SetLayersOutOfOrder()
Definition: Graph.cpp:676
armnn::Compute::Undefined
@ Undefined
armnn::OutputSlot::SetTensorHandleFactory
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
Definition: Layer.cpp:200
armnn::InputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo tensorInfo) override
Sets the TensorInfo for this InputSlot.
Definition: Layer.cpp:587
armnn::SubgraphView::IOutputSlots
std::vector< IOutputSlot * > IOutputSlots
Definition: SubgraphView.hpp:60
armnn::ITensorHandle::Manage
virtual void Manage()=0
Indicate to the memory manager that this resource is active.
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::Graph::AllocateDynamicBuffers
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:186
armnn::DotAttributeSet::AddAttribute
DotAttributeSet & AddAttribute(const std::string &name, const std::stringstream &value)
Definition: DotSerializer.cpp:95
armnn::InputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:53
armnn::Graph::ForEachLayer
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnn::EdgeStrategy::DirectCompatibility
@ DirectCompatibility
No strategy has been defined. Used internally to verify integrity of optimizations.
armnn::Graph::EraseLayer
void EraseLayer(Iterator pos)
Deletes the layer at the specified position.
Definition: Graph.hpp:504
armnn::TensorHandleFactoryRegistry
Definition: TensorHandleFactoryRegistry.hpp:23
armnn::OutputSlot
Definition: Layer.hpp:100
armnn::Graph::SubstituteSubgraph
void SubstituteSubgraph(SubgraphView &subgraph, IConnectableLayer *substituteLayer)
Substitutes the given sub-graph with either a new layer or a new sub-graph.
Definition: Graph.cpp:440
TypesUtils.hpp
armnn::TensorHandleFactoryRegistry::GetFactory
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
Definition: TensorHandleFactoryRegistry.cpp:39
armnn::TensorInfo
Definition: Tensor.hpp:152
Graph.hpp
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
BackendId.hpp
armnn::Graph::VerifyConstantLayerSetTensorInfo
void VerifyConstantLayerSetTensorInfo() const
For each ConstantLayer in Graph, ensures TensorInfo is set on all output slots.
Definition: Graph.cpp:560
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
DotSerializer.hpp
ARMNN_ASSERT_MSG
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
NumericCast.hpp
armnn::DotEdge
Definition: DotSerializer.hpp:77
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::Layer
Definition: Layer.hpp:230
ARMNN_LOG
#define ARMNN_LOG(severity)
Definition: Logging.hpp:212
armnn::EdgeStrategy::CopyToTarget
@ CopyToTarget
Source backends tensor data can be exported to destination backend tensor without copy.
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
Assert.hpp
armnn::InputSlot::GetSlotIndex
unsigned int GetSlotIndex() const override
Definition: Layer.hpp:54
armnn::SubgraphView::IConnectableLayers
std::list< IConnectableLayer * > IConnectableLayers
Definition: SubgraphView.hpp:62
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn::OutputSlot::GetOwningLayer
Layer & GetOwningLayer() const
Definition: Layer.hpp:132
armnn::DotNode::GetContents
NodeContent & GetContents()
Definition: DotSerializer.hpp:107
armnn::SubgraphView::GetIConnectableLayers
const IConnectableLayers & GetIConnectableLayers() const
Definition: SubgraphView.cpp:278
armnn::EdgeStrategy::Undefined
@ Undefined
armnn::NodeContent::AddContent
NodeContent & AddContent(const std::string &content)
Definition: DotSerializer.cpp:147
Logging.hpp
armnn::DotGraph
Definition: DotSerializer.hpp:125
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
IBackendInternal.hpp
LayersFwd.hpp
armnn::ITensorHandle::GetParent
virtual ITensorHandle * GetParent() const =0
Get the parent tensor if this is a subtensor.
armnn::LayerValidationException
Definition: Exceptions.hpp:105
armnn::DotNode
Definition: DotSerializer.hpp:101
armnn::SubgraphView::IInputSlots
std::vector< IInputSlot * > IInputSlots
Definition: SubgraphView.hpp:58
armnn::EdgeStrategy
EdgeStrategy
Definition: ITensorHandleFactory.hpp:104
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::ParameterStringifyFunction
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
Definition: SerializeLayerParameters.hpp:14
armnn::Graph::GetProfiler
const std::shared_ptr< IProfiler > & GetProfiler() const
Definition: Graph.cpp:671
armnn::SubgraphView
The SubgraphView class represents a subgraph of a Graph.
Definition: SubgraphView.hpp:31
armnn::Status::Success
@ Success
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::InputSlot::IsTensorInfoOverridden
bool IsTensorInfoOverridden() const override
Returns true if this InputSlot has an overridden TensorInfo that was set through a call to SetTensorI...
Definition: Layer.cpp:609
armnn::Graph::InferTensorInfos
void InferTensorInfos()
Definition: Graph.cpp:583
armnn::BoostLogSeverityMapping::info
@ info
armnn::LayerType::MemImport
@ MemImport
LayerGuid
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
Definition: Types.hpp:26
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::InputSlot
Definition: Layer.hpp:42
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::LayerType::FullyConnected
@ FullyConnected
SubgraphView.hpp
armnn::LayerType::DepthwiseConvolution2d
@ DepthwiseConvolution2d
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::Graph::AddCompatibilityLayers
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices,...
Definition: Graph.cpp:308
armnn::DotEdge::GetAttributeSet
DotAttributeSet & GetAttributeSet()
Definition: DotSerializer.hpp:83
armnn::Status
Status
Definition: Types.hpp:42
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::Layer::Clone
virtual Layer * Clone(Graph &graph) const =0
Creates a dynamically-allocated copy of this layer.
armnn::Layer::BeginOutputSlots
std::vector< OutputSlot >::iterator BeginOutputSlots()
Definition: Layer.hpp:266
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::Graph::Graph
Graph(bool shapeInferenceMethod=false, bool allowExpandedDims=false)
Definition: Graph.hpp:98
armnn::Graph::TopologicalSort
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
armnn::SubgraphView::GetIInputSlots
const IInputSlots & GetIInputSlots() const
Definition: SubgraphView.cpp:233
armnn::EdgeStrategy::ExportToTarget
@ ExportToTarget
Destination backend can work directly with tensors on source backend.
armnn::OutputSlot::GetConnections
const std::vector< InputSlot * > & GetConnections() const
Definition: Layer.hpp:145
armnn::OutputSlot::SetEdgeStrategy
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:210
armnn::Graph::SerializeToDot
Status SerializeToDot(std::ostream &stream)
Definition: Graph.cpp:125
armnn::Layer::GetBackendId
const BackendId & GetBackendId() const
Definition: Layer.hpp:290
armnn::DotDefaults
Definition: DotSerializer.hpp:114
armnn::BackendId
Definition: BackendId.hpp:75
armnn::OutputSlot::GetTensorHandleFactoryId
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:205
armnn::InputSlot::GetConnectedOutputSlot
const OutputSlot * GetConnectedOutputSlot() const
Definition: Layer.hpp:56
armnn::LayerType::MemCopy
@ MemCopy
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::SubgraphView::ForEachIConnectableLayer
void ForEachIConnectableLayer(Func func) const
Definition: SubgraphView.hpp:46
armnn::Layer::SetBackendId
void SetBackendId(const BackendId &id) override
Set the backend of the IConnectableLayer.
Definition: Layer.hpp:291
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::DotDefaults::GetAttributeSet
DotAttributeSet & GetAttributeSet()
Definition: DotSerializer.hpp:120
armnn::LayerType::Convolution2d
@ Convolution2d
armnn::LayerType::Convolution3d
@ Convolution3d
armnn::Graph::Print
Status Print() const
Definition: Graph.cpp:68
armnn::Graph
Definition: Graph.hpp:30
armnn::SubgraphView::GetIOutputSlots
const IOutputSlots & GetIOutputSlots() const
Definition: SubgraphView.cpp:238
armnn::Status::Failure
@ Failure
armnn::ITensorHandle::Allocate
virtual void Allocate()=0
Indicate to the memory manager that this resource is no longer active.
armnn::LayerType::Constant
@ Constant
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0