ArmNN
 20.05
GraphTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "GraphUtils.hpp"
6 
7 #include <Graph.hpp>
8 #include <Layer.hpp>
9 
10 #include <armnn/TypesUtils.hpp>
11 #include <armnn/Exceptions.hpp>
13 
15 
18 
19 #include <boost/cast.hpp>
20 #include <boost/test/unit_test.hpp>
21 
22 /// Checks that first comes before second in the order.
23 bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second)
24 {
25  graph.Print();
26 
27  const auto& order = graph.TopologicalSort();
28 
29  auto firstPos = std::find(order.begin(), order.end(), first);
30  auto secondPos = std::find(firstPos, order.end(), second);
31 
32  return (secondPos != order.end());
33 }
34 
36 
38 {
39  armnn::Graph graph;
40  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
41  BOOST_TEST(GraphHasNamedLayer(graph, "layerA"));
42 }
43 
44 BOOST_AUTO_TEST_CASE(TopologicalSort)
45 {
46  armnn::Graph graph;
47 
48  armnn::ActivationDescriptor activationDefaults;
49 
50  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
51  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
52  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
53  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
54  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
55  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
56 
57  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
58  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
59  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
60  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
61  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
62  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
63 
64  // Simple graph which branches and rejoins.
65  // A
66  // / \'
67  // D E
68  // \ |
69  // \ B
70  // \|
71  // C
72  layerA->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
73  layerA->GetOutputSlot(0).Connect(layerE->GetInputSlot(0));
74  layerE->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
75  layerD->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
76  layerB->GetOutputSlot(0).Connect(layerC->GetInputSlot(1));
77  layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
78 
79  // check order is valid
80  BOOST_TEST(CheckOrder(graph, layerA, layerD));
81  BOOST_TEST(CheckOrder(graph, layerA, layerE));
82  BOOST_TEST(CheckOrder(graph, layerD, layerC));
83  BOOST_TEST(CheckOrder(graph, layerE, layerB));
84  BOOST_TEST(CheckOrder(graph, layerB, layerC));
85 }
86 
87 BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
88 {
89  armnn::Graph graph;
90  armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
91 
92  std::vector<armnn::Layer*> order;
93 
94  armnn::ActivationDescriptor activationDefaults;
95  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
96  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
97  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
98  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
99  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
100 
101  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
102  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
103  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
104  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
105  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
106 
107  // A
108  // / \'
109  // B C
110  // \ /
111  // D
112  layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
113  layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
114  layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
115  layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
116 
117  layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
118  layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
119  layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
120  layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
121  layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
122 
123  // Checks order is valid.
124  BOOST_TEST(CheckOrder(graph, layerA, layerB));
125  BOOST_TEST(CheckOrder(graph, layerA, layerC));
126  BOOST_TEST(CheckOrder(graph, layerB, layerD));
127  BOOST_TEST(CheckOrder(graph, layerC, layerD));
128 
129  // A
130  // / \'
131  // B C
132  // \ |
133  // \ E
134  // \|
135  // D
136  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
137  activationDefaults,
138  "layerE"));
139 
140  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
141 
142  // Checks order is valid.
143  BOOST_TEST(CheckOrder(graph, layerA, layerB));
144  BOOST_TEST(CheckOrder(graph, layerA, layerC));
145  BOOST_TEST(CheckOrder(graph, layerB, layerD));
146  BOOST_TEST(CheckOrder(graph, layerC, layerE));
147  BOOST_TEST(CheckOrder(graph, layerE, layerD));
148 
149  // A
150  // /|
151  // / F
152  // / |
153  // B C
154  // \ |
155  // \ E
156  // \|
157  // D
158  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
159  activationDefaults,
160  "layerF"));
161 
162  armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
163 
164  // Checks order is valid.
165  BOOST_TEST(CheckOrder(graph, layerA, layerB));
166  BOOST_TEST(CheckOrder(graph, layerA, layerF));
167  BOOST_TEST(CheckOrder(graph, layerF, layerC));
168  BOOST_TEST(CheckOrder(graph, layerB, layerD));
169  BOOST_TEST(CheckOrder(graph, layerC, layerE));
170  BOOST_TEST(CheckOrder(graph, layerE, layerD));
171 }
172 
173 BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
174 {
175  armnn::Graph graph;
176  armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
177 
178  std::vector<armnn::Layer*> order;
179 
180  armnn::ActivationDescriptor activationDefaults;
181  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
182  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
183  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
184  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
185  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
186 
187  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
188  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
189  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
190  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
191  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
192 
193  // A
194  // / \'
195  // B C
196  // \ /
197  // D
198  layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
199  layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
200  layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
201  layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
202 
203  layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
204  layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
205  layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
206  layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
207  layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
208 
209  // Checks order is valid.
210  BOOST_TEST(CheckOrder(graph, layerA, layerB));
211  BOOST_TEST(CheckOrder(graph, layerA, layerC));
212  BOOST_TEST(CheckOrder(graph, layerB, layerD));
213  BOOST_TEST(CheckOrder(graph, layerC, layerD));
214 
215  // A
216  // / \'
217  // B C
218  // \ |
219  // \ E
220  // \|
221  // D
222  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
223  activationDefaults,
224  "layerE"));
225 
226  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
227 
228  // Checks order is valid.
229  BOOST_TEST(CheckOrder(graph, layerA, layerB));
230  BOOST_TEST(CheckOrder(graph, layerA, layerC));
231  BOOST_TEST(CheckOrder(graph, layerB, layerD));
232  BOOST_TEST(CheckOrder(graph, layerC, layerE));
233  BOOST_TEST(CheckOrder(graph, layerE, layerD));
234 
235 
236  // A
237  // |
238  // F
239  // / \'
240  // B C
241  // \ |
242  // \ E
243  // \ /
244  // D
245  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
246  activationDefaults,
247  "layerF"));
248 
249  armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
250 
251  // Checks order is valid.
252  BOOST_TEST(CheckOrder(graph, layerA, layerF));
253  BOOST_TEST(CheckOrder(graph, layerF, layerB));
254  BOOST_TEST(CheckOrder(graph, layerF, layerC));
255  BOOST_TEST(CheckOrder(graph, layerB, layerD));
256  BOOST_TEST(CheckOrder(graph, layerC, layerE));
257  BOOST_TEST(CheckOrder(graph, layerE, layerD));
258 }
259 
260 namespace
261 {
262  using Edge = std::pair<const armnn::Layer*, const armnn::Layer*>;
263 }
264 
265 static std::vector<Edge> GetEdgeList(const armnn::Graph& graph)
266 {
267  std::vector<Edge> edges;
268 
269  for (auto&& srcLayer: graph)
270  {
271  const unsigned int numOutputSlots = srcLayer->GetNumOutputSlots();
272  for (unsigned int s = 0; s < numOutputSlots; ++s)
273  {
274  const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(s);
275  const unsigned int numConnections = outputSlot.GetNumConnections();
276  for (unsigned int c = 0; c < numConnections; ++c)
277  {
278  auto inputSlot = armnn::PolymorphicDowncast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
279  edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer());
280  }
281  }
282  }
283 
284  return edges;
285 }
286 
287 static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armnn::Graph& origGraph)
288 {
289  std::vector<Edge> origEdges = GetEdgeList(origGraph);
290  std::vector<Edge> newEdges = GetEdgeList(graph);
291 
292  // Adding copy layers should not produce any duplicate edges.
293  {
294  std::vector<Edge> sortedNewEdges = newEdges;
295  std::sort(sortedNewEdges.begin(), sortedNewEdges.end());
296 
297  auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end());
298  BOOST_CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
299  }
300 
301  // Each new edge must be tested.
302  while (!newEdges.empty())
303  {
304  const Edge edge = std::move(newEdges.back());
305  newEdges.pop_back();
306 
307  // Edge present in the original graph?
308  int originalEdge = -1;
309  for (unsigned int i = 0; i < origEdges.size(); i++)
310  {
311  const Edge& origEdge = origEdges[i];
312  if (origEdge.first->GetNameStr() == edge.first->GetNameStr() &&
313  origEdge.second->GetNameStr() == edge.second->GetNameStr())
314  {
315  originalEdge = boost::numeric_cast<int>(i);
316  }
317  }
318 
319  if (originalEdge != -1)
320  {
321  // Each vertex should correspond to a layer.
322  const armnn::Layer* srcLayer = edge.first;
323  const armnn::Layer* dstLayer = edge.second;
324  BOOST_TEST(srcLayer);
325  BOOST_TEST(dstLayer);
326 
327  // Both layers must have the same compute device.
328  if (srcLayer && dstLayer)
329  {
330  BOOST_TEST((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
331  }
332 
333  // Marks edge in original graph as observed (by deleting it).
334  origEdges.erase(origEdges.begin() + originalEdge);
335  }
336  else
337  {
338  // Edge did not exist in the original graph.
339  // It must then be an edge connecting a layer and a copy layer.
340  const armnn::Layer* srcLayer = edge.first;
341  const armnn::Layer* dstLayer = edge.second;
342 
343  if (srcLayer == nullptr || dstLayer == nullptr)
344  {
345  BOOST_ERROR("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second << ") "
346  "introduced after adding copy layers to a graph "
347  "correspond to a layer not known to the graph");
348  continue;
349  }
350 
351  // One and only one of the two layers referenced by the edge should be present in the original graph.
352  const bool srcLayerInOrigGraph = GraphHasNamedLayer(origGraph, srcLayer->GetNameStr());
353  const bool dstLayerInOrigGraph = GraphHasNamedLayer(origGraph, dstLayer->GetNameStr());
354 
355  if (srcLayerInOrigGraph == dstLayerInOrigGraph)
356  {
357  BOOST_ERROR("A new edge ("
358  << edge.first->GetName()
359  << ", "
360  << edge.second->GetName()
361  << ") introduced after adding copy "
362  "layers to a graph is invalid. One of the ends should be present in the original "
363  "graph and the other should not, but "
364  << (srcLayerInOrigGraph ? "both are" : "none are"));
365  continue;
366  }
367 
368  const armnn::Layer* copyLayer = srcLayerInOrigGraph ? dstLayer : srcLayer;
369  const armnn::Layer* nonCopyLayer = srcLayerInOrigGraph ? srcLayer : dstLayer;
370 
371  // Finds all edges connecting the copy layer to other layers.
372  std::vector<Edge> adjEdges;
373  auto it = newEdges.begin();
374  while (it != newEdges.end())
375  {
376  Edge& newEdge = *it;
377  if (copyLayer == (srcLayerInOrigGraph ? newEdge.first : newEdge.second))
378  {
379  adjEdges.push_back(newEdge);
380 
381  // Since the adjacent edge is immediately tested below, there is no need to consider it afterwards.
382  it = newEdges.erase(it);
383  }
384  else
385  {
386  it++;
387  }
388  }
389 
390  if (adjEdges.empty())
391  {
392  BOOST_ERROR("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
393  edge.second << "), but no other edges connecting the copy layer '" << copyLayer->GetName()
394  << "' to other layers could be found");
395  continue;
396  }
397 
398  // Tests adjacent edges now.
399  for (const Edge& adjEdge : adjEdges)
400  {
401  // The adjacent edge must connect the copy layer to another layer.
402  const armnn::Layer* adjLayer = srcLayerInOrigGraph ? adjEdge.second : adjEdge.first;
403 
404  if (!adjLayer)
405  {
406  BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an edge "
407  "connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << "), "
408  "but the non-copy layer in the former does not correspond to a layer");
409  continue;
410  }
411 
412  // Both layers must have different compute devices.
413  BOOST_TEST((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
414 
415  // There must exist an edge connecting both layers directly in the original graph.
416  {
417  const armnn::Layer* origEdgeSrc = srcLayerInOrigGraph ? nonCopyLayer : adjLayer;
418  const armnn::Layer* origEdgeDst = srcLayerInOrigGraph ? adjLayer : nonCopyLayer;
419 
420  auto origEdgeIter = origEdges.begin();
421  for (; origEdgeIter != origEdges.end(); origEdgeIter++)
422  {
423  if (origEdgeIter->first->GetNameStr() == origEdgeSrc->GetNameStr() &&
424  origEdgeIter->second->GetNameStr() == origEdgeDst->GetNameStr())
425  {
426  break;
427  }
428  }
429 
430  if (origEdgeIter != origEdges.end())
431  {
432  origEdges.erase(origEdgeIter);
433  }
434  else
435  {
436  BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to an "
437  "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
438  "), but there is no edge connecting the layers in the original graph");
439  }
440  }
441  }
442  }
443  }
444 
445  BOOST_TEST(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
446 }
447 
448 struct CopyLayersFixture
449 {
450  CopyLayersFixture()
451  {
452  }
453 
454  void InitialiseTestGraph()
455  {
456  using namespace armnn;
457  using namespace std;
458 
459  Layer* const inputLayer = AddLayer<InputLayer>(0, "input");
460  inputLayer->SetBackendId(Compute::CpuRef);
461 
462  Convolution2dDescriptor convolutionDefaults;
463  Layer* const convLayer1 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv1");
464  convLayer1->SetBackendId(Compute::CpuRef);
465 
466  inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
467 
468  Layer* const convLayer2 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv2");
469  convLayer2->SetBackendId(Compute::CpuAcc);
470 
471  convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
472 
473  armnn::OriginsDescriptor concatDefaults(2);
474  Layer* const concatLayer = AddLayer<ConcatLayer>(concatDefaults, "concat");
475  concatLayer->SetBackendId(armnn::Compute::CpuRef);
476 
477  convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
478  convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
479 
480  armnn::ActivationDescriptor activationDefaults;
481  Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act");
483 
484  concatLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
485 
486  armnn::SoftmaxDescriptor softmaxDefaults;
487  Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax");
488  softmaxLayer->SetBackendId(armnn::Compute::CpuRef);
489 
490  actLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
491 
492  Layer* const outputLayer = AddLayer<OutputLayer>(0, "output");
493  outputLayer->SetBackendId(armnn::Compute::CpuAcc);
494 
495  softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
496 
497  // Set the memory strategies - for this test should be DirectCompatibility for same backends,
498  // and CopyToTarget for different backends
499  inputLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
500  convLayer1->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
501  convLayer1->GetOutputSlot(0).SetEdgeStrategy(1, EdgeStrategy::DirectCompatibility);
502  convLayer2->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
503  concatLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
504  actLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
505  softmaxLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
506  }
507 
508  armnn::TensorInfo m_TensorDesc;
509  armnn::Graph m_Graph;
510  std::map<armnn::BackendId, std::unique_ptr<armnn::IBackendInternal>> m_Backends;
511  armnn::TensorHandleFactoryRegistry m_FactoryRegistry;
512 
513 private:
514 
515  template <typename LayerType, typename... Args>
516  LayerType* AddLayer(Args&&... args)
517  {
518  LayerType* const layer = m_Graph.AddLayer<LayerType>(std::forward<Args>(args)...);
519 
520  for (auto slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
521  {
522  slot->SetTensorInfo(m_TensorDesc);
523  }
524 
525  return layer;
526  };
527 };
528 
529 BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
530 {
531  InitialiseTestGraph();
532  const armnn::Graph origGraph(m_Graph);
533  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
534 
535  TestGraphAfterAddingCopyLayers(m_Graph, origGraph);
536 }
537 
538 BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture)
539 {
540  InitialiseTestGraph();
541  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
542 
543  // Calling AddCompatibilityLayers() several times should not change the connections.
544  const std::vector<Edge> edges = GetEdgeList(m_Graph);
545  for (int i = 0; i < 4; ++i)
546  {
547  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
548  const std::vector<Edge> otherEdges = GetEdgeList(m_Graph);
549  BOOST_TEST((edges == otherEdges));
550  }
551 }
552 
553 BOOST_FIXTURE_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames, CopyLayersFixture)
554 {
555  armnn::Graph graph;
556 
557  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
559 
560  armnn::ViewsDescriptor splitterDesc(2);
561  armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
562  splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
563 
564  armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
565  additionLayer->SetBackendId(armnn::Compute::CpuRef);
566 
567  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
568  outputLayer->SetBackendId(armnn::Compute::CpuRef);
569 
570  inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
571  splitterLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
572  splitterLayer->GetOutputSlot(1).Connect(additionLayer->GetInputSlot(1));
573  additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
574 
579 
580  graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
581 
582  std::vector<Edge> edges = GetEdgeList(graph);
583  BOOST_CHECK(edges.size() == 6u);
584  std::sort(edges.begin(), edges.end());
585  auto last = std::unique(edges.begin(), edges.end());
586  BOOST_CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
587 }
588 
589 BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
590 {
591  armnn::Graph graph;
592 
593  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "layer");
595 
596  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "layer");
597  outputLayer->SetBackendId(armnn::Compute::CpuRef);
598 
599  inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
600 
601  auto it = graph.TopologicalSort().begin();
602  BOOST_TEST(((*it)->GetType() == armnn::LayerType::Input));
603  BOOST_TEST(((*std::next(it))->GetType() == armnn::LayerType::Output));
604 }
605 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:159
This layer represents a split operation.
A ViewsDescriptor for the SplitterLayer.
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:178
No strategy has been defined. Used internally to verify integrity of optimizations.
CPU Execution: Reference C++ kernels.
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:22
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphTests.cpp:23
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:398
A Convolution2dDescriptor for the Convolution2dLayer.
Source backends tensor data can be exported to destination backend tensor without copy...
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
bool GraphHasNamedLayer(const armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:10
This layer represents an activation operation with the specified activation function.
Copyright (c) 2020 ARM Limited.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:264
virtual const IInputSlot * GetConnection(unsigned int index) const =0
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
An output connection slot for a layer.
Definition: INetwork.hpp:37
An OriginsDescriptor for the ConcatLayer.
const std::string & GetNameStr() const
Definition: Layer.hpp:216
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
GPU Execution: OpenCL: ArmCompute.
BOOST_AUTO_TEST_CASE(ClassGraph)
Definition: GraphTests.cpp:37
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
const BackendId & GetBackendId() const
Definition: Layer.hpp:263
This layer represents an addition operation.
BOOST_AUTO_TEST_SUITE_END()
CPU Execution: NEON: ArmCompute.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
virtual unsigned int GetNumConnections() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:305
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:174
LayerT * InsertNewLayer(InputSlot &insertBefore, Args &&... args)
Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itse...
Definition: Graph.hpp:410
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:262
Status Print() const
Definition: Graph.cpp:60
BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
Definition: GraphTests.cpp:529