ArmNN
 21.11
GraphTests.cpp File Reference

Go to the source code of this file.

Functions

 TEST_SUITE ("Graph")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "Graph"  )

Definition at line 22 of file GraphTests.cpp.

References Graph::AddCompatibilityLayers(), Graph::AddLayer(), Graph::begin(), CheckOrder(), OutputSlot::Connect(), armnn::CopyToTarget, armnn::CpuAcc, armnn::CpuRef, armnn::DirectCompatibility, armnn::Float32, Layer::GetBackendId(), IOutputSlot::GetConnection(), GetFirstLayerWithName(), Layer::GetInputSlot(), Layer::GetName(), Layer::GetNameStr(), IOutputSlot::GetNumConnections(), Layer::GetOutputSlot(), armnn::GpuAcc, GraphHasNamedLayer(), armnn::Input, Graph::InsertNewLayer(), ConstantLayer::m_LayerOutput, armnn::numeric_cast(), armnn::Output, Layer::SetBackendId(), OutputSlot::SetEdgeStrategy(), OutputSlot::SetTensorInfo(), TEST_CASE_FIXTURE(), and Graph::TopologicalSort().

23 {
24 TEST_CASE("ClassGraph")
25 {
26  armnn::Graph graph;
27  CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
28  CHECK(GraphHasNamedLayer(graph, "layerA"));
29 }
30 
31 TEST_CASE("TopologicalSort")
32 {
33  armnn::Graph graph;
34 
35  armnn::ActivationDescriptor activationDefaults;
36 
37  CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
38  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
39  CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
40  CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
41  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
42  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
43 
44  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
45  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
46  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
47  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
48  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
49  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
50 
51  // Simple graph which branches and rejoins.
52  // A
53  // / \'
54  // D E
55  // \ |
56  // \ B
57  // \|
58  // C
59  layerA->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
60  layerA->GetOutputSlot(0).Connect(layerE->GetInputSlot(0));
61  layerE->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
62  layerD->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
63  layerB->GetOutputSlot(0).Connect(layerC->GetInputSlot(1));
64  layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
65 
66  // check order is valid
67  CHECK(CheckOrder(graph, layerA, layerD));
68  CHECK(CheckOrder(graph, layerA, layerE));
69  CHECK(CheckOrder(graph, layerD, layerC));
70  CHECK(CheckOrder(graph, layerE, layerB));
71  CHECK(CheckOrder(graph, layerB, layerC));
72 }
73 
74 TEST_CASE("InsertNewLayerBefore")
75 {
76  armnn::Graph graph;
77  armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
78 
79  std::vector<armnn::Layer*> order;
80 
81  armnn::ActivationDescriptor activationDefaults;
82  CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
83  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
84  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
85  CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
86  CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
87 
88  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
89  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
90  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
91  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
92  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
93 
94  // A
95  // / \'
96  // B C
97  // \ /
98  // D
99  layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
100  layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
101  layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
102  layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
103 
104  layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
105  layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
106  layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
107  layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
108  layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
109 
110  // Checks order is valid.
111  CHECK(CheckOrder(graph, layerA, layerB));
112  CHECK(CheckOrder(graph, layerA, layerC));
113  CHECK(CheckOrder(graph, layerB, layerD));
114  CHECK(CheckOrder(graph, layerC, layerD));
115 
116  // A
117  // / \'
118  // B C
119  // \ |
120  // \ E
121  // \|
122  // D
123  CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
124  activationDefaults,
125  "layerE"));
126 
127  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
128 
129  // Checks order is valid.
130  CHECK(CheckOrder(graph, layerA, layerB));
131  CHECK(CheckOrder(graph, layerA, layerC));
132  CHECK(CheckOrder(graph, layerB, layerD));
133  CHECK(CheckOrder(graph, layerC, layerE));
134  CHECK(CheckOrder(graph, layerE, layerD));
135 
136  // A
137  // /|
138  // / F
139  // / |
140  // B C
141  // \ |
142  // \ E
143  // \|
144  // D
145  CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
146  activationDefaults,
147  "layerF"));
148 
149  armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
150 
151  // Checks order is valid.
152  CHECK(CheckOrder(graph, layerA, layerB));
153  CHECK(CheckOrder(graph, layerA, layerF));
154  CHECK(CheckOrder(graph, layerF, layerC));
155  CHECK(CheckOrder(graph, layerB, layerD));
156  CHECK(CheckOrder(graph, layerC, layerE));
157  CHECK(CheckOrder(graph, layerE, layerD));
158 }
159 
160 TEST_CASE("InsertNewLayerAfter")
161 {
162  armnn::Graph graph;
163  armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
164 
165  std::vector<armnn::Layer*> order;
166 
167  armnn::ActivationDescriptor activationDefaults;
168  CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
169  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
170  CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
171  CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
172  CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
173 
174  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
175  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
176  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
177  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
178  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
179 
180  // A
181  // / \'
182  // B C
183  // \ /
184  // D
185  layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
186  layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
187  layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
188  layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
189 
190  layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
191  layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
192  layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
193  layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
194  layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
195 
196  // Checks order is valid.
197  CHECK(CheckOrder(graph, layerA, layerB));
198  CHECK(CheckOrder(graph, layerA, layerC));
199  CHECK(CheckOrder(graph, layerB, layerD));
200  CHECK(CheckOrder(graph, layerC, layerD));
201 
202  // A
203  // / \'
204  // B C
205  // \ |
206  // \ E
207  // \|
208  // D
209  CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
210  activationDefaults,
211  "layerE"));
212 
213  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
214 
215  // Checks order is valid.
216  CHECK(CheckOrder(graph, layerA, layerB));
217  CHECK(CheckOrder(graph, layerA, layerC));
218  CHECK(CheckOrder(graph, layerB, layerD));
219  CHECK(CheckOrder(graph, layerC, layerE));
220  CHECK(CheckOrder(graph, layerE, layerD));
221 
222 
223  // A
224  // |
225  // F
226  // / \'
227  // B C
228  // \ |
229  // \ E
230  // \ /
231  // D
232  CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
233  activationDefaults,
234  "layerF"));
235 
236  armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
237 
238  // Checks order is valid.
239  CHECK(CheckOrder(graph, layerA, layerF));
240  CHECK(CheckOrder(graph, layerF, layerB));
241  CHECK(CheckOrder(graph, layerF, layerC));
242  CHECK(CheckOrder(graph, layerB, layerD));
243  CHECK(CheckOrder(graph, layerC, layerE));
244  CHECK(CheckOrder(graph, layerE, layerD));
245 }
246 
247 namespace
248 {
249  using Edge = std::pair<const armnn::Layer*, const armnn::Layer*>;
250 }
251 
252 static std::vector<Edge> GetEdgeList(const armnn::Graph& graph)
253 {
254  std::vector<Edge> edges;
255 
256  for (auto&& srcLayer: graph)
257  {
258  const unsigned int numOutputSlots = srcLayer->GetNumOutputSlots();
259  for (unsigned int s = 0; s < numOutputSlots; ++s)
260  {
261  const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(s);
262  const unsigned int numConnections = outputSlot.GetNumConnections();
263  for (unsigned int c = 0; c < numConnections; ++c)
264  {
265  auto inputSlot = armnn::PolymorphicDowncast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
266  edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer());
267  }
268  }
269  }
270 
271  return edges;
272 }
273 
274 static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armnn::Graph& origGraph)
275 {
276  std::vector<Edge> origEdges = GetEdgeList(origGraph);
277  std::vector<Edge> newEdges = GetEdgeList(graph);
278 
279  // Adding copy layers should not produce any duplicate edges.
280  {
281  std::vector<Edge> sortedNewEdges = newEdges;
282  std::sort(sortedNewEdges.begin(), sortedNewEdges.end());
283 
284  auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end());
285  CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
286  }
287 
288  // Each new edge must be tested.
289  while (!newEdges.empty())
290  {
291  const Edge edge = std::move(newEdges.back());
292  newEdges.pop_back();
293 
294  // Edge present in the original graph?
295  int originalEdge = -1;
296  for (unsigned int i = 0; i < origEdges.size(); i++)
297  {
298  const Edge& origEdge = origEdges[i];
299  if (origEdge.first->GetNameStr() == edge.first->GetNameStr() &&
300  origEdge.second->GetNameStr() == edge.second->GetNameStr())
301  {
302  originalEdge = armnn::numeric_cast<int>(i);
303  }
304  }
305 
306  if (originalEdge != -1)
307  {
308  // Each vertex should correspond to a layer.
309  const armnn::Layer* srcLayer = edge.first;
310  const armnn::Layer* dstLayer = edge.second;
311  CHECK(srcLayer);
312  CHECK(dstLayer);
313 
314  // Both layers must have the same compute device.
315  if (srcLayer && dstLayer)
316  {
317  CHECK((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
318  }
319 
320  // Marks edge in original graph as observed (by deleting it).
321  origEdges.erase(origEdges.begin() + originalEdge);
322  }
323  else
324  {
325  // Edge did not exist in the original graph.
326  // It must then be an edge connecting a layer and a copy layer.
327  const armnn::Layer* srcLayer = edge.first;
328  const armnn::Layer* dstLayer = edge.second;
329 
330  if (srcLayer == nullptr || dstLayer == nullptr)
331  {
332  FAIL("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second
333  << ") introduced after adding copy layers to a graph "
334  "correspond to a layer not known to the graph");
335  continue;
336  }
337 
338  // One and only one of the two layers referenced by the edge should be present in the original graph.
339  const bool srcLayerInOrigGraph = GraphHasNamedLayer(origGraph, srcLayer->GetNameStr());
340  const bool dstLayerInOrigGraph = GraphHasNamedLayer(origGraph, dstLayer->GetNameStr());
341 
342  if (srcLayerInOrigGraph == dstLayerInOrigGraph)
343  {
344  FAIL("A new edge ("
345  << edge.first->GetName()
346  << ", "
347  << edge.second->GetName()
348  << ") introduced after adding copy "
349  "layers to a graph is invalid. One of the ends should be present in the original "
350  "graph and the other should not, but "
351  << (srcLayerInOrigGraph ? "both are" : "none are"));
352  continue;
353  }
354 
355  const armnn::Layer* copyLayer = srcLayerInOrigGraph ? dstLayer : srcLayer;
356  const armnn::Layer* nonCopyLayer = srcLayerInOrigGraph ? srcLayer : dstLayer;
357 
358  // Finds all edges connecting the copy layer to other layers.
359  std::vector<Edge> adjEdges;
360  auto it = newEdges.begin();
361  while (it != newEdges.end())
362  {
363  Edge& newEdge = *it;
364  if (copyLayer == (srcLayerInOrigGraph ? newEdge.first : newEdge.second))
365  {
366  adjEdges.push_back(newEdge);
367 
368  // Since the adjacent edge is immediately tested below, there is no need to consider it afterwards.
369  it = newEdges.erase(it);
370  }
371  else
372  {
373  it++;
374  }
375  }
376 
377  if (adjEdges.empty())
378  {
379  FAIL("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
380  edge.second << "), but no other edges connecting the copy layer '" << copyLayer->GetName()
381  << "' to other layers could be found");
382  continue;
383  }
384 
385  // Tests adjacent edges now.
386  for (const Edge& adjEdge : adjEdges)
387  {
388  // The adjacent edge must connect the copy layer to another layer.
389  const armnn::Layer* adjLayer = srcLayerInOrigGraph ? adjEdge.second : adjEdge.first;
390 
391  if (!adjLayer)
392  {
393  FAIL("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an "
394  "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
395  "), but the non-copy layer in the former does not correspond to a layer");
396  continue;
397  }
398 
399  // Both layers must have different compute devices.
400  CHECK((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
401 
402  // There must exist an edge connecting both layers directly in the original graph.
403  {
404  const armnn::Layer* origEdgeSrc = srcLayerInOrigGraph ? nonCopyLayer : adjLayer;
405  const armnn::Layer* origEdgeDst = srcLayerInOrigGraph ? adjLayer : nonCopyLayer;
406 
407  auto origEdgeIter = origEdges.begin();
408  for (; origEdgeIter != origEdges.end(); origEdgeIter++)
409  {
410  if (origEdgeIter->first->GetNameStr() == origEdgeSrc->GetNameStr() &&
411  origEdgeIter->second->GetNameStr() == origEdgeDst->GetNameStr())
412  {
413  break;
414  }
415  }
416 
417  if (origEdgeIter != origEdges.end())
418  {
419  origEdges.erase(origEdgeIter);
420  }
421  else
422  {
423  FAIL("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to "
424  "an edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
425  "), but there is no edge connecting the layers in the original graph");
426  }
427  }
428  }
429  }
430  }
431 
432  CHECK_MESSAGE(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
433 }
434 
435 struct CopyLayersFixture
436 {
437  CopyLayersFixture()
438  {
439  }
440 
441  void InitialiseTestGraph()
442  {
443  using namespace armnn;
444  using namespace std;
445 
446  Layer* const inputLayer = AddLayer<InputLayer>(0, "input");
447  inputLayer->SetBackendId(Compute::CpuRef);
448 
449  Convolution2dDescriptor convolutionDefaults;
450  Layer* const convLayer1 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv1");
451  convLayer1->SetBackendId(Compute::CpuRef);
452 
453  inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
454 
455  Layer* const convLayer2 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv2");
456  convLayer2->SetBackendId(Compute::CpuAcc);
457 
458  convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
459 
460  armnn::OriginsDescriptor concatDefaults(2);
461  Layer* const concatLayer = AddLayer<ConcatLayer>(concatDefaults, "concat");
462  concatLayer->SetBackendId(armnn::Compute::CpuRef);
463 
464  convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
465  convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
466 
467  armnn::ActivationDescriptor activationDefaults;
468  Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act");
470 
471  concatLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
472 
473  armnn::SoftmaxDescriptor softmaxDefaults;
474  Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax");
475  softmaxLayer->SetBackendId(armnn::Compute::CpuRef);
476 
477  actLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
478 
479  Layer* const outputLayer = AddLayer<OutputLayer>(0, "output");
480  outputLayer->SetBackendId(armnn::Compute::CpuAcc);
481 
482  softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
483 
484  // Set the memory strategies - for this test should be DirectCompatibility for same backends,
485  // and CopyToTarget for different backends
486  inputLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
487  convLayer1->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
488  convLayer1->GetOutputSlot(0).SetEdgeStrategy(1, EdgeStrategy::DirectCompatibility);
489  convLayer2->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
490  concatLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
491  actLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
492  softmaxLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
493  }
494 
495  armnn::TensorInfo m_TensorDesc;
496  armnn::Graph m_Graph;
497  std::map<armnn::BackendId, std::unique_ptr<armnn::IBackendInternal>> m_Backends;
498  armnn::TensorHandleFactoryRegistry m_FactoryRegistry;
499 
500 private:
501 
502  template <typename LayerType, typename... Args>
503  LayerType* AddLayer(Args&&... args)
504  {
505  LayerType* const layer = m_Graph.AddLayer<LayerType>(std::forward<Args>(args)...);
506 
507  for (auto slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
508  {
509  slot->SetTensorInfo(m_TensorDesc);
510  }
511 
512  return layer;
513  };
514 };
515 
516 TEST_CASE_FIXTURE(CopyLayersFixture, "AddCopyLayers")
517 {
518  InitialiseTestGraph();
519  const armnn::Graph origGraph(m_Graph);
520  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
521 
522  TestGraphAfterAddingCopyLayers(m_Graph, origGraph);
523 }
524 
525 TEST_CASE_FIXTURE(CopyLayersFixture, "AddCopyLayersSeveralTimes")
526 {
527  InitialiseTestGraph();
528  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
529 
530  // Calling AddCompatibilityLayers() several times should not change the connections.
531  const std::vector<Edge> edges = GetEdgeList(m_Graph);
532  for (int i = 0; i < 4; ++i)
533  {
534  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
535  const std::vector<Edge> otherEdges = GetEdgeList(m_Graph);
536  CHECK((edges == otherEdges));
537  }
538 }
539 
540 TEST_CASE_FIXTURE(CopyLayersFixture, "CopyLayersAddedBetweenSameLayersHaveDifferentNames")
541 {
542  armnn::Graph graph;
543 
544  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
546 
547  armnn::ViewsDescriptor splitterDesc(2);
548  armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
549  splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
550 
551  armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
552  additionLayer->SetBackendId(armnn::Compute::CpuRef);
553 
554  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
555  outputLayer->SetBackendId(armnn::Compute::CpuRef);
556 
557  inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
558  splitterLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
559  splitterLayer->GetOutputSlot(1).Connect(additionLayer->GetInputSlot(1));
560  additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
561 
566 
567  graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
568 
569  std::vector<Edge> edges = GetEdgeList(graph);
570  CHECK(edges.size() == 6u);
571  std::sort(edges.begin(), edges.end());
572  auto last = std::unique(edges.begin(), edges.end());
573  CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
574 }
575 
576 TEST_CASE("DuplicateLayerNames")
577 {
578  armnn::Graph graph;
579 
580  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "layer");
582 
583  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "layer");
584  outputLayer->SetBackendId(armnn::Compute::CpuRef);
585 
586  inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
587 
588  auto it = graph.TopologicalSort().begin();
589  CHECK(((*it)->GetType() == armnn::LayerType::Input));
590  CHECK(((*std::next(it))->GetType() == armnn::LayerType::Output));
591 }
592 
593 TEST_CASE("CheckGraphConstTensorSharing")
594 {
595  armnn::Graph graph0;
596  const float* sharedWeightPtr;
597 
598  {
599  armnn::Graph graph1;
600 
601  armnn::ConstantLayer* const constantLayer = graph1.AddLayer<armnn::ConstantLayer>("ConstantLayer");
602 
603  float weight = 1.0f;
604  armnn::ConstTensor constTensor({{ 1, 1 }, armnn::DataType::Float32, 0.0f, 0, true}, &weight);
605  constantLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(constTensor);;
606 
607  // point sharedWeightPtr to graph1's const tensor
608  sharedWeightPtr = constantLayer->m_LayerOutput->GetConstTensor<float>();
609 
610  graph0 = armnn::Graph(graph1);
611  // graph1 goes out of scope
612  }
613 
614  CHECK(*sharedWeightPtr == 1);
615 }
616 
617 }
A layer that the constant data can be bound to.
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:165
This layer represents a split operation.
A ViewsDescriptor for the SplitterLayer.
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:181
No strategy has been defined. Used internally to verify integrity of optimizations.
CPU Execution: Reference C++ kernels.
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphUtils.cpp:68
std::shared_ptr< ConstTensorHandle > m_LayerOutput
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:417
A Convolution2dDescriptor for the Convolution2dLayer.
Source backends tensor data can be exported to destination backend tensor without copy...
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
bool GraphHasNamedLayer(const armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:10
This layer represents an activation operation with the specified activation function.
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:270
virtual const IInputSlot * GetConnection(unsigned int index) const =0
TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
An output connection slot for a layer.
Definition: INetwork.hpp:37
An OriginsDescriptor for the ConcatLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
const std::string & GetNameStr() const
Definition: Layer.hpp:220
GPU Execution: OpenCL: ArmCompute.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
const BackendId & GetBackendId() const
Definition: Layer.hpp:269
This layer represents an addition operation.
CPU Execution: NEON: ArmCompute.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
virtual unsigned int GetNumConnections() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:180
LayerT * InsertNewLayer(InputSlot &insertBefore, Args &&... args)
Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itse...
Definition: Graph.hpp:431
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:302
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:443