ArmNN
 21.05
CompatibilityTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <boost/test/unit_test.hpp>
7 
8 #include <cl/ClBackend.hpp>
9 #include <neon/NeonBackend.hpp>
10 #include <reference/RefBackend.hpp>
11 
12 #include <Network.hpp>
13 
14 #include <vector>
15 #include <string>
16 
17 using namespace armnn;
18 
19 BOOST_AUTO_TEST_SUITE(BackendsCompatibility, * boost::unit_test::disabled())
20 
21 BOOST_AUTO_TEST_CASE(Neon_Cl_DirectCompatibility_Test)
22 {
23  auto neonBackend = std::make_unique<NeonBackend>();
24  auto clBackend = std::make_unique<ClBackend>();
25 
27  neonBackend->RegisterTensorHandleFactories(registry);
28  clBackend->RegisterTensorHandleFactories(registry);
29 
30  const BackendId& neonBackendId = neonBackend->GetId();
31  const BackendId& clBackendId = clBackend->GetId();
32 
33  BackendsMap backends;
34  backends[neonBackendId] = std::move(neonBackend);
35  backends[clBackendId] = std::move(clBackend);
36 
37  armnn::Graph graph;
38 
39  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
40 
41  inputLayer->SetBackendId(neonBackendId);
42 
44  armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
45  softmaxLayer1->SetBackendId(clBackendId);
46 
47  armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
48  softmaxLayer2->SetBackendId(neonBackendId);
49 
50  armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
51  softmaxLayer3->SetBackendId(clBackendId);
52 
53  armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
54  softmaxLayer4->SetBackendId(neonBackendId);
55 
56  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
57  outputLayer->SetBackendId(clBackendId);
58 
59  inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
60  softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
61  softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
62  softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
63  softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
64 
65  graph.TopologicalSort();
66 
67  std::vector<std::string> errors;
68  auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
69 
70  BOOST_TEST(result.m_Error == false);
71  BOOST_TEST(result.m_Warning == false);
72 
73  OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
74  OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
75  OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
76  OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
77  OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
78 
79  // Check that the correct factory was selected
80  BOOST_TEST(inputLayerOut.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
81  BOOST_TEST(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
82  BOOST_TEST(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
83  BOOST_TEST(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
84  BOOST_TEST(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
85 
86  // Check that the correct strategy was selected
87  BOOST_TEST((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
88  BOOST_TEST((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
89  BOOST_TEST((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
90  BOOST_TEST((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
91  BOOST_TEST((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
92 
93  graph.AddCompatibilityLayers(backends, registry);
94 
95  // Test for copy layers
96  int copyCount= 0;
97  graph.ForEachLayer([&copyCount](Layer* layer)
98  {
99  if (layer->GetType() == LayerType::MemCopy)
100  {
101  copyCount++;
102  }
103  });
104  BOOST_TEST(copyCount == 0);
105 
106  // Test for import layers
107  int importCount= 0;
108  graph.ForEachLayer([&importCount](Layer *layer)
109  {
110  if (layer->GetType() == LayerType::MemImport)
111  {
112  importCount++;
113  }
114  });
115  BOOST_TEST(importCount == 0);
116 }
117 
119 
121 
122 #if defined(ARMNNREF_ENABLED)
123 
124 BOOST_AUTO_TEST_CASE(Ref_Backends_Capability_Test)
125 {
126  auto refBackend = std::make_unique<RefBackend>();
127  BOOST_CHECK(refBackend->HasCapability(armnn::BackendCapability::NonConstWeights));
128 
129  BOOST_CHECK(!refBackend->HasCapability(armnn::BackendCapability::AsyncExecution));
130 }
131 
132 #endif
133 
134 #if defined(ARMCOMPUTENEON_ENABLED)
135 
136 BOOST_AUTO_TEST_CASE(Neon_Backends_Capability_Test)
137 {
138  auto neonBackend = std::make_unique<NeonBackend>();
139  BOOST_CHECK(!neonBackend->HasCapability(armnn::BackendCapability::NonConstWeights));
140 }
141 
142 #endif
143 
144 #if defined(ARMCOMPUTECL_ENABLED)
145 
146 BOOST_AUTO_TEST_CASE(Cl_Backends_Capability_Test)
147 {
148  auto clBackend = std::make_unique<ClBackend>();
149  BOOST_CHECK(!clBackend->HasCapability(armnn::BackendCapability::NonConstWeights));
150 }
151 
152 #endif
153 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
No strategy has been defined. Used internally to verify integrity of optimizations.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
Definition: Layer.cpp:186
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:270
Constant weights can be accessed through the descriptors, On the other hand, non-const weights can be...
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
void ForEachLayer(Func func) const
Definition: Graph.hpp:39
BackendCapability
BackendCapability class.
Definition: Types.hpp:220
This layer represents a softmax operation.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
BOOST_AUTO_TEST_SUITE_END()
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry &registry, bool importEnabled, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:1500
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
Definition: Layer.cpp:176
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:300
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
Definition: Network.hpp:317