ArmNN
 20.02
GraphTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "GraphUtils.hpp"
6 
7 #include <Graph.hpp>
8 #include <Layer.hpp>
9 
10 #include <armnn/TypesUtils.hpp>
11 #include <armnn/Exceptions.hpp>
12 
14 
17 
18 #include <boost/cast.hpp>
19 #include <boost/test/unit_test.hpp>
20 
21 /// Checks that first comes before second in the order.
22 bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second)
23 {
24  graph.Print();
25 
26  const auto& order = graph.TopologicalSort();
27 
28  auto firstPos = std::find(order.begin(), order.end(), first);
29  auto secondPos = std::find(firstPos, order.end(), second);
30 
31  return (secondPos != order.end());
32 }
33 
35 
37 {
38  armnn::Graph graph;
39  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
40  BOOST_TEST(GraphHasNamedLayer(graph, "layerA"));
41 }
42 
43 BOOST_AUTO_TEST_CASE(TopologicalSort)
44 {
45  armnn::Graph graph;
46 
47  armnn::ActivationDescriptor activationDefaults;
48 
49  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
50  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
51  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
52  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
53  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
54  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
55 
56  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
57  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
58  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
59  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
60  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
61  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
62 
63  // Simple graph which branches and rejoins.
64  // A
65  // / \'
66  // D E
67  // \ |
68  // \ B
69  // \|
70  // C
71  layerA->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
72  layerA->GetOutputSlot(0).Connect(layerE->GetInputSlot(0));
73  layerE->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
74  layerD->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
75  layerB->GetOutputSlot(0).Connect(layerC->GetInputSlot(1));
76  layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
77 
78  // check order is valid
79  BOOST_TEST(CheckOrder(graph, layerA, layerD));
80  BOOST_TEST(CheckOrder(graph, layerA, layerE));
81  BOOST_TEST(CheckOrder(graph, layerD, layerC));
82  BOOST_TEST(CheckOrder(graph, layerE, layerB));
83  BOOST_TEST(CheckOrder(graph, layerB, layerC));
84 }
85 
86 BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
87 {
88  armnn::Graph graph;
89  armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
90 
91  std::vector<armnn::Layer*> order;
92 
93  armnn::ActivationDescriptor activationDefaults;
94  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
95  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
96  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
97  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
98  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
99 
100  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
101  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
102  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
103  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
104  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
105 
106  // A
107  // / \'
108  // B C
109  // \ /
110  // D
111  layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
112  layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
113  layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
114  layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
115 
116  layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
117  layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
118  layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
119  layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
120  layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
121 
122  // Checks order is valid.
123  BOOST_TEST(CheckOrder(graph, layerA, layerB));
124  BOOST_TEST(CheckOrder(graph, layerA, layerC));
125  BOOST_TEST(CheckOrder(graph, layerB, layerD));
126  BOOST_TEST(CheckOrder(graph, layerC, layerD));
127 
128  // A
129  // / \'
130  // B C
131  // \ |
132  // \ E
133  // \|
134  // D
135  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
136  activationDefaults,
137  "layerE"));
138 
139  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
140 
141  // Checks order is valid.
142  BOOST_TEST(CheckOrder(graph, layerA, layerB));
143  BOOST_TEST(CheckOrder(graph, layerA, layerC));
144  BOOST_TEST(CheckOrder(graph, layerB, layerD));
145  BOOST_TEST(CheckOrder(graph, layerC, layerE));
146  BOOST_TEST(CheckOrder(graph, layerE, layerD));
147 
148  // A
149  // /|
150  // / F
151  // / |
152  // B C
153  // \ |
154  // \ E
155  // \|
156  // D
157  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
158  activationDefaults,
159  "layerF"));
160 
161  armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
162 
163  // Checks order is valid.
164  BOOST_TEST(CheckOrder(graph, layerA, layerB));
165  BOOST_TEST(CheckOrder(graph, layerA, layerF));
166  BOOST_TEST(CheckOrder(graph, layerF, layerC));
167  BOOST_TEST(CheckOrder(graph, layerB, layerD));
168  BOOST_TEST(CheckOrder(graph, layerC, layerE));
169  BOOST_TEST(CheckOrder(graph, layerE, layerD));
170 }
171 
172 BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
173 {
174  armnn::Graph graph;
175  armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
176 
177  std::vector<armnn::Layer*> order;
178 
179  armnn::ActivationDescriptor activationDefaults;
180  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
181  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
182  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
183  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
184  BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
185 
186  armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
187  armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
188  armnn::Layer* const layerC = GetFirstLayerWithName(graph, "layerC");
189  armnn::Layer* const layerD = GetFirstLayerWithName(graph, "layerD");
190  armnn::Layer* const layerO = GetFirstLayerWithName(graph, "output");
191 
192  // A
193  // / \'
194  // B C
195  // \ /
196  // D
197  layerA->GetOutputSlot(0).SetTensorInfo(tensorInfo);
198  layerB->GetOutputSlot(0).SetTensorInfo(tensorInfo);
199  layerC->GetOutputSlot(0).SetTensorInfo(tensorInfo);
200  layerD->GetOutputSlot(0).SetTensorInfo(tensorInfo);
201 
202  layerA->GetOutputSlot(0).Connect(layerB->GetInputSlot(0));
203  layerA->GetOutputSlot(0).Connect(layerC->GetInputSlot(0));
204  layerB->GetOutputSlot(0).Connect(layerD->GetInputSlot(0));
205  layerC->GetOutputSlot(0).Connect(layerD->GetInputSlot(1));
206  layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
207 
208  // Checks order is valid.
209  BOOST_TEST(CheckOrder(graph, layerA, layerB));
210  BOOST_TEST(CheckOrder(graph, layerA, layerC));
211  BOOST_TEST(CheckOrder(graph, layerB, layerD));
212  BOOST_TEST(CheckOrder(graph, layerC, layerD));
213 
214  // A
215  // / \'
216  // B C
217  // \ |
218  // \ E
219  // \|
220  // D
221  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
222  activationDefaults,
223  "layerE"));
224 
225  armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
226 
227  // Checks order is valid.
228  BOOST_TEST(CheckOrder(graph, layerA, layerB));
229  BOOST_TEST(CheckOrder(graph, layerA, layerC));
230  BOOST_TEST(CheckOrder(graph, layerB, layerD));
231  BOOST_TEST(CheckOrder(graph, layerC, layerE));
232  BOOST_TEST(CheckOrder(graph, layerE, layerD));
233 
234 
235  // A
236  // |
237  // F
238  // / \'
239  // B C
240  // \ |
241  // \ E
242  // \ /
243  // D
244  BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
245  activationDefaults,
246  "layerF"));
247 
248  armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
249 
250  // Checks order is valid.
251  BOOST_TEST(CheckOrder(graph, layerA, layerF));
252  BOOST_TEST(CheckOrder(graph, layerF, layerB));
253  BOOST_TEST(CheckOrder(graph, layerF, layerC));
254  BOOST_TEST(CheckOrder(graph, layerB, layerD));
255  BOOST_TEST(CheckOrder(graph, layerC, layerE));
256  BOOST_TEST(CheckOrder(graph, layerE, layerD));
257 }
258 
259 namespace
260 {
261  using Edge = std::pair<const armnn::Layer*, const armnn::Layer*>;
262 }
263 
264 static std::vector<Edge> GetEdgeList(const armnn::Graph& graph)
265 {
266  std::vector<Edge> edges;
267 
268  for (auto&& srcLayer: graph)
269  {
270  const unsigned int numOutputSlots = srcLayer->GetNumOutputSlots();
271  for (unsigned int s = 0; s < numOutputSlots; ++s)
272  {
273  const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(s);
274  const unsigned int numConnections = outputSlot.GetNumConnections();
275  for (unsigned int c = 0; c < numConnections; ++c)
276  {
277  auto inputSlot = boost::polymorphic_downcast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
278  edges.emplace_back(srcLayer, &inputSlot->GetOwningLayer());
279  }
280  }
281  }
282 
283  return edges;
284 }
285 
286 static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armnn::Graph& origGraph)
287 {
288  std::vector<Edge> origEdges = GetEdgeList(origGraph);
289  std::vector<Edge> newEdges = GetEdgeList(graph);
290 
291  // Adding copy layers should not produce any duplicate edges.
292  {
293  std::vector<Edge> sortedNewEdges = newEdges;
294  std::sort(sortedNewEdges.begin(), sortedNewEdges.end());
295 
296  auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end());
297  BOOST_CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
298  }
299 
300  // Each new edge must be tested.
301  while (!newEdges.empty())
302  {
303  const Edge edge = std::move(newEdges.back());
304  newEdges.pop_back();
305 
306  // Edge present in the original graph?
307  int originalEdge = -1;
308  for (unsigned int i = 0; i < origEdges.size(); i++)
309  {
310  const Edge& origEdge = origEdges[i];
311  if (origEdge.first->GetNameStr() == edge.first->GetNameStr() &&
312  origEdge.second->GetNameStr() == edge.second->GetNameStr())
313  {
314  originalEdge = boost::numeric_cast<int>(i);
315  }
316  }
317 
318  if (originalEdge != -1)
319  {
320  // Each vertex should correspond to a layer.
321  const armnn::Layer* srcLayer = edge.first;
322  const armnn::Layer* dstLayer = edge.second;
323  BOOST_TEST(srcLayer);
324  BOOST_TEST(dstLayer);
325 
326  // Both layers must have the same compute device.
327  if (srcLayer && dstLayer)
328  {
329  BOOST_TEST((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
330  }
331 
332  // Marks edge in original graph as observed (by deleting it).
333  origEdges.erase(origEdges.begin() + originalEdge);
334  }
335  else
336  {
337  // Edge did not exist in the original graph.
338  // It must then be an edge connecting a layer and a copy layer.
339  const armnn::Layer* srcLayer = edge.first;
340  const armnn::Layer* dstLayer = edge.second;
341 
342  if (srcLayer == nullptr || dstLayer == nullptr)
343  {
344  BOOST_ERROR("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second << ") "
345  "introduced after adding copy layers to a graph "
346  "correspond to a layer not known to the graph");
347  continue;
348  }
349 
350  // One and only one of the two layers referenced by the edge should be present in the original graph.
351  const bool srcLayerInOrigGraph = GraphHasNamedLayer(origGraph, srcLayer->GetNameStr());
352  const bool dstLayerInOrigGraph = GraphHasNamedLayer(origGraph, dstLayer->GetNameStr());
353 
354  if (srcLayerInOrigGraph == dstLayerInOrigGraph)
355  {
356  BOOST_ERROR("A new edge ("
357  << edge.first->GetName()
358  << ", "
359  << edge.second->GetName()
360  << ") introduced after adding copy "
361  "layers to a graph is invalid. One of the ends should be present in the original "
362  "graph and the other should not, but "
363  << (srcLayerInOrigGraph ? "both are" : "none are"));
364  continue;
365  }
366 
367  const armnn::Layer* copyLayer = srcLayerInOrigGraph ? dstLayer : srcLayer;
368  const armnn::Layer* nonCopyLayer = srcLayerInOrigGraph ? srcLayer : dstLayer;
369 
370  // Finds all edges connecting the copy layer to other layers.
371  std::vector<Edge> adjEdges;
372  auto it = newEdges.begin();
373  while (it != newEdges.end())
374  {
375  Edge& newEdge = *it;
376  if (copyLayer == (srcLayerInOrigGraph ? newEdge.first : newEdge.second))
377  {
378  adjEdges.push_back(newEdge);
379 
380  // Since the adjacent edge is immediately tested below, there is no need to consider it afterwards.
381  it = newEdges.erase(it);
382  }
383  else
384  {
385  it++;
386  }
387  }
388 
389  if (adjEdges.empty())
390  {
391  BOOST_ERROR("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
392  edge.second << "), but no other edges connecting the copy layer '" << copyLayer->GetName()
393  << "' to other layers could be found");
394  continue;
395  }
396 
397  // Tests adjacent edges now.
398  for (const Edge& adjEdge : adjEdges)
399  {
400  // The adjacent edge must connect the copy layer to another layer.
401  const armnn::Layer* adjLayer = srcLayerInOrigGraph ? adjEdge.second : adjEdge.first;
402 
403  if (!adjLayer)
404  {
405  BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an edge "
406  "connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << "), "
407  "but the non-copy layer in the former does not correspond to a layer");
408  continue;
409  }
410 
411  // Both layers must have different compute devices.
412  BOOST_TEST((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
413 
414  // There must exist an edge connecting both layers directly in the original graph.
415  {
416  const armnn::Layer* origEdgeSrc = srcLayerInOrigGraph ? nonCopyLayer : adjLayer;
417  const armnn::Layer* origEdgeDst = srcLayerInOrigGraph ? adjLayer : nonCopyLayer;
418 
419  auto origEdgeIter = origEdges.begin();
420  for (; origEdgeIter != origEdges.end(); origEdgeIter++)
421  {
422  if (origEdgeIter->first->GetNameStr() == origEdgeSrc->GetNameStr() &&
423  origEdgeIter->second->GetNameStr() == origEdgeDst->GetNameStr())
424  {
425  break;
426  }
427  }
428 
429  if (origEdgeIter != origEdges.end())
430  {
431  origEdges.erase(origEdgeIter);
432  }
433  else
434  {
435  BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to an "
436  "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
437  "), but there is no edge connecting the layers in the original graph");
438  }
439  }
440  }
441  }
442  }
443 
444  BOOST_TEST(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
445 }
446 
447 struct CopyLayersFixture
448 {
449  CopyLayersFixture()
450  {
451  }
452 
453  void InitialiseTestGraph()
454  {
455  using namespace armnn;
456  using namespace std;
457 
458  Layer* const inputLayer = AddLayer<InputLayer>(0, "input");
459  inputLayer->SetBackendId(Compute::CpuRef);
460 
461  Convolution2dDescriptor convolutionDefaults;
462  Layer* const convLayer1 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv1");
463  convLayer1->SetBackendId(Compute::CpuRef);
464 
465  inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
466 
467  Layer* const convLayer2 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv2");
468  convLayer2->SetBackendId(Compute::CpuAcc);
469 
470  convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
471 
472  armnn::OriginsDescriptor concatDefaults(2);
473  Layer* const concatLayer = AddLayer<ConcatLayer>(concatDefaults, "concat");
474  concatLayer->SetBackendId(armnn::Compute::CpuRef);
475 
476  convLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
477  convLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
478 
479  armnn::ActivationDescriptor activationDefaults;
480  Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act");
482 
483  concatLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
484 
485  armnn::SoftmaxDescriptor softmaxDefaults;
486  Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax");
487  softmaxLayer->SetBackendId(armnn::Compute::CpuRef);
488 
489  actLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
490 
491  Layer* const outputLayer = AddLayer<OutputLayer>(0, "output");
492  outputLayer->SetBackendId(armnn::Compute::CpuAcc);
493 
494  softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
495 
496  // Set the memory strategies - for this test should be DirectCompatibility for same backends,
497  // and CopyToTarget for different backends
498  inputLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
499  convLayer1->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
500  convLayer1->GetOutputSlot(0).SetEdgeStrategy(1, EdgeStrategy::DirectCompatibility);
501  convLayer2->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
502  concatLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
503  actLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::DirectCompatibility);
504  softmaxLayer->GetOutputSlot(0).SetEdgeStrategy(0, EdgeStrategy::CopyToTarget);
505  }
506 
507  armnn::TensorInfo m_TensorDesc;
508  armnn::Graph m_Graph;
509  std::map<armnn::BackendId, std::unique_ptr<armnn::IBackendInternal>> m_Backends;
510  armnn::TensorHandleFactoryRegistry m_FactoryRegistry;
511 
512 private:
513 
514  template <typename LayerType, typename... Args>
515  LayerType* AddLayer(Args&&... args)
516  {
517  LayerType* const layer = m_Graph.AddLayer<LayerType>(std::forward<Args>(args)...);
518 
519  for (auto slot = layer->BeginOutputSlots(); slot != layer->EndOutputSlots(); ++slot)
520  {
521  slot->SetTensorInfo(m_TensorDesc);
522  }
523 
524  return layer;
525  };
526 };
527 
528 BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
529 {
530  InitialiseTestGraph();
531  const armnn::Graph origGraph(m_Graph);
532  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
533 
534  TestGraphAfterAddingCopyLayers(m_Graph, origGraph);
535 }
536 
537 BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture)
538 {
539  InitialiseTestGraph();
540  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
541 
542  // Calling AddCompatibilityLayers() several times should not change the connections.
543  const std::vector<Edge> edges = GetEdgeList(m_Graph);
544  for (int i = 0; i < 4; ++i)
545  {
546  m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
547  const std::vector<Edge> otherEdges = GetEdgeList(m_Graph);
548  BOOST_TEST((edges == otherEdges));
549  }
550 }
551 
552 BOOST_FIXTURE_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames, CopyLayersFixture)
553 {
554  armnn::Graph graph;
555 
556  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
558 
559  armnn::ViewsDescriptor splitterDesc(2);
560  armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
561  splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
562 
563  armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
564  additionLayer->SetBackendId(armnn::Compute::CpuRef);
565 
566  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
567  outputLayer->SetBackendId(armnn::Compute::CpuRef);
568 
569  inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
570  splitterLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
571  splitterLayer->GetOutputSlot(1).Connect(additionLayer->GetInputSlot(1));
572  additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
573 
578 
579  graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
580 
581  std::vector<Edge> edges = GetEdgeList(graph);
582  BOOST_CHECK(edges.size() == 6u);
583  std::sort(edges.begin(), edges.end());
584  auto last = std::unique(edges.begin(), edges.end());
585  BOOST_CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
586 }
587 
588 BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
589 {
590  armnn::Graph graph;
591 
592  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "layer");
594 
595  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "layer");
596  outputLayer->SetBackendId(armnn::Compute::CpuRef);
597 
598  inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
599 
600  auto it = graph.TopologicalSort().begin();
601  BOOST_TEST(((*it)->GetType() == armnn::LayerType::Input));
602  BOOST_TEST(((*std::next(it))->GetType() == armnn::LayerType::Output));
603 }
604 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
Iterator begin()
Returns iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:158
This layer represents a split operation.
A ViewsDescriptor for the SplitterLayer.
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
Definition: Layer.cpp:177
No strategy has been defined. Used internally to verify integrity of optimizations.
CPU Execution: Reference C++ kernels.
armnn::Layer * GetFirstLayerWithName(armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:20
bool CheckOrder(const armnn::Graph &graph, const armnn::Layer *first, const armnn::Layer *second)
Checks that first comes before second in the order.
Definition: GraphTests.cpp:22
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
A Convolution2dDescriptor for the Convolution2dLayer.
Source backends tensor data can be exported to destination backend tensor without copy...
int Connect(InputSlot &destination)
Definition: Layer.cpp:79
bool GraphHasNamedLayer(const armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:8
This layer represents an activation operation with the specified activation function.
Copyright (c) 2020 ARM Limited.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:264
virtual const IInputSlot * GetConnection(unsigned int index) const =0
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:310
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
An output connection slot for a layer.
Definition: INetwork.hpp:37
An OriginsDescriptor for the ConcatLayer.
const std::string & GetNameStr() const
Definition: Layer.hpp:216
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
GPU Execution: OpenCL: ArmCompute.
BOOST_AUTO_TEST_CASE(ClassGraph)
Definition: GraphTests.cpp:36
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
const BackendId & GetBackendId() const
Definition: Layer.hpp:263
This layer represents an addition operation.
BOOST_AUTO_TEST_SUITE_END()
CPU Execution: NEON: ArmCompute.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:312
virtual unsigned int GetNumConnections() const =0
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:305
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:173
LayerT * InsertNewLayer(InputSlot &insertBefore, Args &&... args)
Inserts a new layer between the output slot currently connected to insertBefore and insertBefore itse...
Definition: Graph.hpp:409
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:263
Status Print() const
Definition: Graph.cpp:61
BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
Definition: GraphTests.cpp:528