ArmNN
 21.08
CompatibilityTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #if defined(ARMCOMPUTECL_ENABLED)
7 #include <cl/ClBackend.hpp>
8 #endif
9 #if defined(ARMCOMPUTENEON_ENABLED)
10 #include <neon/NeonBackend.hpp>
11 #endif
12 #include <reference/RefBackend.hpp>
13 #include <armnn/BackendHelper.hpp>
14 
15 #include <Network.hpp>
16 
17 #include <doctest/doctest.h>
18 
19 #include <vector>
20 #include <string>
21 
22 using namespace armnn;
23 
24 #if defined(ARMCOMPUTENEON_ENABLED) && defined(ARMCOMPUTECL_ENABLED)
25 
26 TEST_SUITE("BackendsCompatibility")
27 {
28 // Partially disabled Test Suite
29 TEST_CASE("Neon_Cl_DirectCompatibility_Test")
30 {
31  auto neonBackend = std::make_unique<NeonBackend>();
32  auto clBackend = std::make_unique<ClBackend>();
33 
35  neonBackend->RegisterTensorHandleFactories(registry);
36  clBackend->RegisterTensorHandleFactories(registry);
37 
38  const BackendId& neonBackendId = neonBackend->GetId();
39  const BackendId& clBackendId = clBackend->GetId();
40 
41  BackendsMap backends;
42  backends[neonBackendId] = std::move(neonBackend);
43  backends[clBackendId] = std::move(clBackend);
44 
45  armnn::Graph graph;
46 
47  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
48 
49  inputLayer->SetBackendId(neonBackendId);
50 
52  armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
53  softmaxLayer1->SetBackendId(clBackendId);
54 
55  armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
56  softmaxLayer2->SetBackendId(neonBackendId);
57 
58  armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
59  softmaxLayer3->SetBackendId(clBackendId);
60 
61  armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
62  softmaxLayer4->SetBackendId(neonBackendId);
63 
64  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
65  outputLayer->SetBackendId(clBackendId);
66 
67  inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
68  softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
69  softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
70  softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
71  softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
72 
73  graph.TopologicalSort();
74 
75  std::vector<std::string> errors;
76  auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
77 
78  CHECK(result.m_Error == false);
79  CHECK(result.m_Warning == false);
80 
81  // OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
82  // OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
83  // OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
84  // OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
85  // OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
86 
87  // // Check that the correct factory was selected
88  // CHECK(inputLayerOut.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
89  // CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
90  // CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
91  // CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
92  // CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
93 
94  // // Check that the correct strategy was selected
95  // CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
96  // CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
97  // CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
98  // CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
99  // CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
100 
101  graph.AddCompatibilityLayers(backends, registry);
102 
103  // Test for copy layers
104  int copyCount= 0;
105  graph.ForEachLayer([&copyCount](Layer* layer)
106  {
107  if (layer->GetType() == LayerType::MemCopy)
108  {
109  copyCount++;
110  }
111  });
112  // CHECK(copyCount == 0);
113 
114  // Test for import layers
115  int importCount= 0;
116  graph.ForEachLayer([&importCount](Layer *layer)
117  {
118  if (layer->GetType() == LayerType::MemImport)
119  {
120  importCount++;
121  }
122  });
123  // CHECK(importCount == 0);
124 }
125 
126 }
127 #endif
128 
129 TEST_SUITE("BackendCapability")
130 {
131 #if defined(ARMNNREF_ENABLED)
132 
133 TEST_CASE("Ref_Backends_Capability_Test")
134 {
135  auto refBackend = std::make_unique<RefBackend>();
136  auto refCapabilities = refBackend->GetCapabilities();
137 
138  CHECK(armnn::HasCapability("NonConstWeights", refCapabilities));
139  CHECK(armnn::HasCapability("AsyncExecution", refCapabilities));
140 
141  armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
143 
144  CHECK(armnn::HasCapability(nonConstWeights, refCapabilities));
145  CHECK(armnn::HasCapability(AsyncExecution, refCapabilities));
146 }
147 
148 TEST_CASE("Ref_Backends_Unkown_Capability_Test")
149 {
150  auto refBackend = std::make_unique<RefBackend>();
151  auto refCapabilities = refBackend->GetCapabilities();
152 
153  armnn::BackendOptions::BackendOption AsyncExecutionFalse{"AsyncExecution", false};
154  CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
155 
156  armnn::BackendOptions::BackendOption AsyncExecutionInt{"AsyncExecution", 50};
157  CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
158 
159  armnn::BackendOptions::BackendOption AsyncExecutionFloat{"AsyncExecution", 0.0f};
160  CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
161 
162  armnn::BackendOptions::BackendOption AsyncExecutionString{"AsyncExecution", "true"};
163  CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
164 
165  CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
166  armnn::BackendOptions::BackendOption unkownCapability{"Telekinesis", true};
167  CHECK(!armnn::HasCapability(unkownCapability, refCapabilities));
168 }
169 
170 #endif
171 
172 #if defined(ARMCOMPUTENEON_ENABLED)
173 
174 TEST_CASE("Neon_Backends_Capability_Test")
175 {
176  auto neonBackend = std::make_unique<NeonBackend>();
177  auto neonCapabilities = neonBackend->GetCapabilities();
178 
179  CHECK(armnn::HasCapability("NonConstWeights", neonCapabilities));
180  CHECK(armnn::HasCapability("AsyncExecution", neonCapabilities));
181 
182  armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", false};
183  armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", false};
184 
185  CHECK(armnn::HasCapability(nonConstWeights, neonCapabilities));
186  CHECK(armnn::HasCapability(AsyncExecution, neonCapabilities));
187 }
188 
189 #endif
190 
191 #if defined(ARMCOMPUTECL_ENABLED)
192 
193 TEST_CASE("Cl_Backends_Capability_Test")
194 {
195  auto clBackend = std::make_unique<ClBackend>();
196  auto clCapabilities = clBackend->GetCapabilities();
197 
198  CHECK(armnn::HasCapability("NonConstWeights", clCapabilities));
199  CHECK(armnn::HasCapability("AsyncExecution", clCapabilities));
200 
201  armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", false};
202  armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", false};
203 
204  CHECK(armnn::HasCapability(nonConstWeights, clCapabilities));
205  CHECK(armnn::HasCapability(AsyncExecution, clCapabilities));
206 }
207 
208 #endif
209 
210 }
TEST_SUITE("TestConstTensorLayerVisitor")
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:270
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
void ForEachLayer(Func func) const
Definition: Graph.hpp:39
This layer represents a softmax operation.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry &registry, bool importEnabled, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:1545
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:177
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:300
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
Definition: Network.hpp:313