aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/CreateWorkload.hpp2
-rw-r--r--src/armnn/test/GraphTests.cpp30
-rw-r--r--src/armnn/test/NetworkTests.cpp26
3 files changed, 29 insertions, 29 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 21385d7a99..db1773a0ce 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -32,7 +32,7 @@ std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer, Graph& graph, const
BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()),
"Cannot convert to derived class");
std::string reasonIfUnsupported;
- layer.SetComputeDevice(factory.GetCompute());
+ layer.SetBackendId(factory.GetCompute());
BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported));
return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
}
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index b297a74785..e99cb153fc 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -336,7 +336,7 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
// Both layers must have the same compute device.
if (srcLayer && dstLayer)
{
- BOOST_TEST((srcLayer->GetComputeDevice() == dstLayer->GetComputeDevice()));
+ BOOST_TEST((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
}
// Marks edge in original graph as observed (by deleting it).
@@ -418,7 +418,7 @@ static void TestGraphAfterAddingCopyLayers(const armnn::Graph& graph, const armn
}
// Both layers must have different compute devices.
- BOOST_TEST((nonCopyLayer->GetComputeDevice() != adjLayer->GetComputeDevice()));
+ BOOST_TEST((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
// There must exist an edge connecting both layers directly in the original graph.
{
@@ -453,40 +453,40 @@ struct CopyLayersFixture
using namespace std;
Layer* const inputLayer = AddLayer<InputLayer>(0, "input");
- inputLayer->SetComputeDevice(Compute::CpuRef);
+ inputLayer->SetBackendId(Compute::CpuRef);
Convolution2dDescriptor convolutionDefaults;
Layer* const convLayer1 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv1");
- convLayer1->SetComputeDevice(Compute::CpuRef);
+ convLayer1->SetBackendId(Compute::CpuRef);
inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
Layer* const convLayer2 = AddLayer<Convolution2dLayer>(convolutionDefaults, "conv2");
- convLayer2->SetComputeDevice(Compute::CpuRef);
+ convLayer2->SetBackendId(Compute::CpuRef);
convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
armnn::OriginsDescriptor mergerDefaults(2);
Layer* const mergerLayer = AddLayer<MergerLayer>(mergerDefaults, "merger");
- mergerLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ mergerLayer->SetBackendId(armnn::Compute::CpuRef);
convLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
convLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
armnn::ActivationDescriptor activationDefaults;
Layer* const actLayer = AddLayer<ActivationLayer>(activationDefaults, "act");
- actLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ actLayer->SetBackendId(armnn::Compute::CpuRef);
mergerLayer->GetOutputSlot(0).Connect(actLayer->GetInputSlot(0));
armnn::SoftmaxDescriptor softmaxDefaults;
Layer* const softmaxLayer = AddLayer<SoftmaxLayer>(softmaxDefaults, "softmax");
- softmaxLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ softmaxLayer->SetBackendId(armnn::Compute::CpuRef);
actLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
Layer* const outputLayer = AddLayer<OutputLayer>(0, "output");
- outputLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ outputLayer->SetBackendId(armnn::Compute::CpuRef);
softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
}
@@ -537,17 +537,17 @@ BOOST_AUTO_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames)
armnn::Graph graph;
armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
- inputLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ inputLayer->SetBackendId(armnn::Compute::CpuRef);
armnn::ViewsDescriptor splitterDesc(2);
armnn::SplitterLayer* const splitterLayer = graph.AddLayer<armnn::SplitterLayer>(splitterDesc, "splitter");
- splitterLayer->SetComputeDevice(armnn::Compute::GpuAcc);
+ splitterLayer->SetBackendId(armnn::Compute::GpuAcc);
armnn::AdditionLayer* const additionLayer = graph.AddLayer<armnn::AdditionLayer>("addition");
- additionLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ additionLayer->SetBackendId(armnn::Compute::CpuRef);
armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
- outputLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ outputLayer->SetBackendId(armnn::Compute::CpuRef);
inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
splitterLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
@@ -568,10 +568,10 @@ BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
armnn::Graph graph;
armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "layer");
- inputLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ inputLayer->SetBackendId(armnn::Compute::CpuRef);
armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "layer");
- outputLayer->SetComputeDevice(armnn::Compute::CpuRef);
+ outputLayer->SetBackendId(armnn::Compute::CpuRef);
inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index f1319464fc..3b426fa8ab 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -510,7 +510,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
armnn::NeonWorkloadFactory fact;
for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuAcc, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
BOOST_CHECK_NO_THROW(
layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
}
@@ -541,7 +541,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
armnn::ClWorkloadFactory fact;
for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
{
- BOOST_CHECK_EQUAL(armnn::Compute::GpuAcc, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
BOOST_CHECK_NO_THROW(
layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
}
@@ -609,14 +609,14 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
#if ARMCOMPUTENEON_ENABLED
if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuAcc, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
}
else if (layer->GetType() == armnn::LayerType::Normalization)
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
#else
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
#endif
}
}
@@ -747,7 +747,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
armnn::RefWorkloadFactory fact;
for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
BOOST_CHECK_NO_THROW(
layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
}
@@ -791,23 +791,23 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback
#if ARMCOMPUTENEON_ENABLED
if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuAcc, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
}
else if (layer->GetType() == armnn::LayerType::Normalization)
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
#elif ARMCOMPUTECL_ENABLED
if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
{
- BOOST_CHECK_EQUAL(armnn::Compute::GpuAcc, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
}
else if (layer->GetType() == armnn::LayerType::Normalization)
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
#else
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
#endif
}
}
@@ -841,7 +841,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
}
@@ -874,7 +874,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
{
- BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
}