ArmNN
 22.05
CompatibilityTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #if defined(ARMCOMPUTECL_ENABLED)
7 #include <cl/ClBackend.hpp>
8 #endif
9 #if defined(ARMCOMPUTENEON_ENABLED)
10 #include <neon/NeonBackend.hpp>
11 #endif
12 #include <reference/RefBackend.hpp>
13 #include <armnn/BackendHelper.hpp>
14 
15 #include <Network.hpp>
16 
17 #include <doctest/doctest.h>
18 
19 #include <vector>
20 #include <string>
21 
22 using namespace armnn;
23 
24 #if defined(ARMCOMPUTENEON_ENABLED) && defined(ARMCOMPUTECL_ENABLED)
25 
26 TEST_SUITE("BackendsCompatibility")
27 {
28 // Partially disabled Test Suite
29 TEST_CASE("Neon_Cl_DirectCompatibility_Test")
30 {
31  auto neonBackend = std::make_unique<NeonBackend>();
32  auto clBackend = std::make_unique<ClBackend>();
33 
35  neonBackend->RegisterTensorHandleFactories(registry);
36  clBackend->RegisterTensorHandleFactories(registry);
37 
38  const BackendId& neonBackendId = neonBackend->GetId();
39  const BackendId& clBackendId = clBackend->GetId();
40 
41  BackendsMap backends;
42  backends[neonBackendId] = std::move(neonBackend);
43  backends[clBackendId] = std::move(clBackend);
44 
45  armnn::Graph graph;
46 
47  armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
48 
49  inputLayer->SetBackendId(neonBackendId);
50 
52  armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
53  softmaxLayer1->SetBackendId(clBackendId);
54 
55  armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
56  softmaxLayer2->SetBackendId(neonBackendId);
57 
58  armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
59  softmaxLayer3->SetBackendId(clBackendId);
60 
61  armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
62  softmaxLayer4->SetBackendId(neonBackendId);
63 
64  armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
65  outputLayer->SetBackendId(clBackendId);
66 
67  inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
68  softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
69  softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
70  softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
71  softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
72 
73  graph.TopologicalSort();
74 
75  std::vector<std::string> errors;
76  auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
77 
78  CHECK(result.m_Error == false);
79  CHECK(result.m_Warning == false);
80 
81  // OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
82  // OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
83  // OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
84  // OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
85  // OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
86 
87  // // Check that the correct factory was selected
88  // CHECK(inputLayerOut.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
89  // CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
90  // CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
91  // CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
92  // CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
93 
94  // // Check that the correct strategy was selected
95  // CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
96  // CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
97  // CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
98  // CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
99  // CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
100 
101  graph.AddCompatibilityLayers(backends, registry);
102 
103  // Test for copy layers
104  int copyCount= 0;
105  graph.ForEachLayer([&copyCount](Layer* layer)
106  {
107  if (layer->GetType() == LayerType::MemCopy)
108  {
109  copyCount++;
110  }
111  });
112  // CHECK(copyCount == 0);
113 
114  // Test for import layers
115  int importCount= 0;
116  graph.ForEachLayer([&importCount](Layer *layer)
117  {
118  if (layer->GetType() == LayerType::MemImport)
119  {
120  importCount++;
121  }
122  });
123  // CHECK(importCount == 0);
124 }
125 
126 }
127 #endif
128 
129 TEST_SUITE("BackendCapability")
130 {
131 
132 namespace
133 {
134 #if defined(ARMNNREF_ENABLED) || defined(ARMCOMPUTENEON_ENABLED) || defined(ARMCOMPUTECL_ENABLED)
135 void CapabilityTestHelper(BackendCapabilities &capabilities,
136  std::vector<std::pair<std::string, bool>> capabilityVector)
137 {
138  for (auto pair : capabilityVector)
139  {
140  CHECK_MESSAGE(armnn::HasCapability(pair.first, capabilities),
141  pair.first << " capability was not been found");
142  CHECK_MESSAGE(armnn::HasCapability(BackendOptions::BackendOption{pair.first, pair.second}, capabilities),
143  pair.first << " capability set incorrectly");
144  }
145 }
146 #endif
147 
148 #if defined(ARMNNREF_ENABLED)
149 
150 TEST_CASE("Ref_Backends_Unknown_Capability_Test")
151 {
152  auto refBackend = std::make_unique<RefBackend>();
153  auto refCapabilities = refBackend->GetCapabilities();
154 
155  armnn::BackendOptions::BackendOption AsyncExecutionFalse{"AsyncExecution", false};
156  CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
157 
158  armnn::BackendOptions::BackendOption AsyncExecutionInt{"AsyncExecution", 50};
159  CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
160 
161  armnn::BackendOptions::BackendOption AsyncExecutionFloat{"AsyncExecution", 0.0f};
162  CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
163 
164  armnn::BackendOptions::BackendOption AsyncExecutionString{"AsyncExecution", "true"};
165  CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
166 
167  CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
168  armnn::BackendOptions::BackendOption unknownCapability{"Telekinesis", true};
169  CHECK(!armnn::HasCapability(unknownCapability, refCapabilities));
170 }
171 
172 TEST_CASE ("Ref_Backends_Capability_Test")
173 {
174  auto refBackend = std::make_unique<RefBackend>();
175  auto refCapabilities = refBackend->GetCapabilities();
176 
177  CapabilityTestHelper(refCapabilities,
178  {{"NonConstWeights", true},
179  {"AsyncExecution", true},
180  {"ProtectedContentAllocation", false},
181  {"ConstantTensorsAsInputs", true},
182  {"PreImportIOTensors", true},
183  {"ExternallyManagedMemory", true},
184  {"MultiAxisPacking", false}});
185 }
186 
187 #endif
188 
189 #if defined(ARMCOMPUTENEON_ENABLED)
190 
191 TEST_CASE ("Neon_Backends_Capability_Test")
192 {
193  auto neonBackend = std::make_unique<NeonBackend>();
194  auto neonCapabilities = neonBackend->GetCapabilities();
195 
196  CapabilityTestHelper(neonCapabilities,
197  {{"NonConstWeights", false},
198  {"AsyncExecution", false},
199  {"ProtectedContentAllocation", false},
200  {"ConstantTensorsAsInputs", true},
201  {"PreImportIOTensors", false},
202  {"ExternallyManagedMemory", true},
203  {"MultiAxisPacking", false}});
204 }
205 
206 #endif
207 
208 #if defined(ARMCOMPUTECL_ENABLED)
209 
210 TEST_CASE ("Cl_Backends_Capability_Test")
211 {
212  auto clBackend = std::make_unique<ClBackend>();
213  auto clCapabilities = clBackend->GetCapabilities();
214 
215  CapabilityTestHelper(clCapabilities,
216  {{"NonConstWeights", false},
217  {"AsyncExecution", false},
218  {"ProtectedContentAllocation", true},
219  {"ConstantTensorsAsInputs", true},
220  {"PreImportIOTensors", false},
221  {"ExternallyManagedMemory", true},
222  {"MultiAxisPacking", false}});
223 }
224 
225 #endif
226 }
227 }
TEST_SUITE("TestConstTensorLayerVisitor")
bool HasCapability(const std::string &name, const BackendCapabilities &capabilities)
Convenience function to check if a capability exists in a BackendCapabilites struct.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:425
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
Definition: Layer.hpp:276
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:322
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
void ForEachLayer(Func func) const
Definition: Graph.hpp:40
This layer represents a softmax operation.
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:271
Struct for the users to pass backend specific options.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
OptimizationResult SelectTensorHandleStrategy(Graph &optGraph, BackendsMap &backends, TensorHandleFactoryRegistry &registry, bool importEnabled, Optional< std::vector< std::string > &> errMessages)
Definition: Network.cpp:1601
Graph & TopologicalSort()
Sorts layers in topological order and return this.
Definition: Graph.hpp:184
A SoftmaxDescriptor for the SoftmaxLayer.
void AddCompatibilityLayers(std::map< BackendId, std::unique_ptr< class IBackendInternal >> &backends, TensorHandleFactoryRegistry &registry)
Modifies the graph in-place, removing edges connecting layers using different compute devices...
Definition: Graph.cpp:303
std::map< BackendId, std::unique_ptr< class IBackendInternal > > BackendsMap
Definition: Network.hpp:294