aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-06-15 15:42:17 +0100
committerfinn.williams <finn.williams@arm.com>2021-06-21 08:40:34 +0000
commit50f7b327e1871596355bbfed8aae5d4f7b2caae7 (patch)
tree5b020de6a300664174f7c0353c20ca3d22382d58
parent87b529679439d45fb8d1bad52f7d2037f3efb654 (diff)
downloadarmnn-50f7b327e1871596355bbfed8aae5d4f7b2caae7.tar.gz
IVGCVSW-6099 Fix BackendsCompatibility tests with only Neon or CL enabled
Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I70492e34351f72ac700b24e40e1f7572f66a76f2
-rw-r--r--src/backends/backendsCommon/test/CompatibilityTests.cpp204
1 files changed, 103 insertions, 101 deletions
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index 4abab2724a..12cb5e9956 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -17,107 +17,109 @@
using namespace armnn;
-#if defined(ARMCOMPUTENEON_ENABLED)
-// Disabled Test Suite
-//TEST_SUITE("BackendsCompatibility")
-//TEST_CASE("Neon_Cl_DirectCompatibility_Test")
-//{
-// auto neonBackend = std::make_unique<NeonBackend>();
-// auto clBackend = std::make_unique<ClBackend>();
-//
-// TensorHandleFactoryRegistry registry;
-// neonBackend->RegisterTensorHandleFactories(registry);
-// clBackend->RegisterTensorHandleFactories(registry);
-//
-// const BackendId& neonBackendId = neonBackend->GetId();
-// const BackendId& clBackendId = clBackend->GetId();
-//
-// BackendsMap backends;
-// backends[neonBackendId] = std::move(neonBackend);
-// backends[clBackendId] = std::move(clBackend);
-//
-// armnn::Graph graph;
-//
-// armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
-//
-// inputLayer->SetBackendId(neonBackendId);
-//
-// armnn::SoftmaxDescriptor smDesc;
-// armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
-// softmaxLayer1->SetBackendId(clBackendId);
-//
-// armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
-// softmaxLayer2->SetBackendId(neonBackendId);
-//
-// armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
-// softmaxLayer3->SetBackendId(clBackendId);
-//
-// armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
-// softmaxLayer4->SetBackendId(neonBackendId);
-//
-// armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
-// outputLayer->SetBackendId(clBackendId);
-//
-// inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
-// softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
-// softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
-// softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
-// softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-//
-// graph.TopologicalSort();
-//
-// std::vector<std::string> errors;
-// auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
-//
-// CHECK(result.m_Error == false);
-// CHECK(result.m_Warning == false);
-//
-// OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
-// OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
-// OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
-// OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
-// OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
-//
-// // Check that the correct factory was selected
-// CHECK(inputLayerOut.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-// CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-// CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-// CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-// CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-//
-// // Check that the correct strategy was selected
-// CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-// CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-// CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-// CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-// CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-//
-// graph.AddCompatibilityLayers(backends, registry);
-//
-// // Test for copy layers
-// int copyCount= 0;
-// graph.ForEachLayer([&copyCount](Layer* layer)
-// {
-// if (layer->GetType() == LayerType::MemCopy)
-// {
-// copyCount++;
-// }
-// });
-// CHECK(copyCount == 0);
-//
-// // Test for import layers
-// int importCount= 0;
-// graph.ForEachLayer([&importCount](Layer *layer)
-// {
-// if (layer->GetType() == LayerType::MemImport)
-// {
-// importCount++;
-// }
-// });
-// CHECK(importCount == 0);
-//}
-//
-//}
+#if defined(ARMCOMPUTENEON_ENABLED) && defined(ARMCOMPUTECL_ENABLED)
+
+TEST_SUITE("BackendsCompatibility")
+{
+// Partially disabled Test Suite
+TEST_CASE("Neon_Cl_DirectCompatibility_Test")
+{
+ auto neonBackend = std::make_unique<NeonBackend>();
+ auto clBackend = std::make_unique<ClBackend>();
+
+ TensorHandleFactoryRegistry registry;
+ neonBackend->RegisterTensorHandleFactories(registry);
+ clBackend->RegisterTensorHandleFactories(registry);
+
+ const BackendId& neonBackendId = neonBackend->GetId();
+ const BackendId& clBackendId = clBackend->GetId();
+
+ BackendsMap backends;
+ backends[neonBackendId] = std::move(neonBackend);
+ backends[clBackendId] = std::move(clBackend);
+
+ armnn::Graph graph;
+
+ armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
+
+ inputLayer->SetBackendId(neonBackendId);
+
+ armnn::SoftmaxDescriptor smDesc;
+ armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
+ softmaxLayer1->SetBackendId(clBackendId);
+
+ armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
+ softmaxLayer2->SetBackendId(neonBackendId);
+
+ armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
+ softmaxLayer3->SetBackendId(clBackendId);
+
+ armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
+ softmaxLayer4->SetBackendId(neonBackendId);
+
+ armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
+ outputLayer->SetBackendId(clBackendId);
+
+ inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
+ softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
+ softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
+ softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
+ softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ graph.TopologicalSort();
+
+ std::vector<std::string> errors;
+ auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+
+ CHECK(result.m_Error == false);
+ CHECK(result.m_Warning == false);
+
+ // OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
+ // OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
+ // OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
+ // OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
+ // OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
+
+ // // Check that the correct factory was selected
+ // CHECK(inputLayerOut.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+ // CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+ // CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+ // CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+ // CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+
+ // // Check that the correct strategy was selected
+ // CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ // CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ // CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ // CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+ // CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+
+ graph.AddCompatibilityLayers(backends, registry);
+
+ // Test for copy layers
+ int copyCount= 0;
+ graph.ForEachLayer([&copyCount](Layer* layer)
+ {
+ if (layer->GetType() == LayerType::MemCopy)
+ {
+ copyCount++;
+ }
+ });
+ // CHECK(copyCount == 0);
+
+ // Test for import layers
+ int importCount= 0;
+ graph.ForEachLayer([&importCount](Layer *layer)
+ {
+ if (layer->GetType() == LayerType::MemImport)
+ {
+ importCount++;
+ }
+ });
+ // CHECK(importCount == 0);
+}
+
+}
#endif
TEST_SUITE("BackendCapability")