From f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9 Mon Sep 17 00:00:00 2001 From: David Beck Date: Fri, 19 Oct 2018 15:20:56 +0100 Subject: IVGCVSW-2019 : replace Compute enum in the backend preferences list Change-Id: Ie7549fd27378acfa97e68d098e338b8c9d4ea5d2 --- src/armnn/test/EndToEndTest.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'src/armnn/test/EndToEndTest.cpp') diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp index 98b18411d4..d34bf69548 100644 --- a/src/armnn/test/EndToEndTest.cpp +++ b/src/armnn/test/EndToEndTest.cpp @@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(Unsigned8) softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. @@ -110,7 +110,7 @@ BOOST_AUTO_TEST_CASE(Unsigned8) } template -void ConstantUsageTest(const std::vector& computeDevice, +void ConstantUsageTest(const std::vector& computeDevice, const armnn::TensorInfo& commonTensorInfo, const std::vector& inputData, const std::vector& constantData, @@ -165,7 +165,7 @@ void ConstantUsageTest(const std::vector& computeDevice, BOOST_TEST(outputData == expectedOutputData); } -static void ConstantUsageFloat32Test(const std::vector& computeDevice) +static void ConstantUsageFloat32Test(const std::vector& computeDevice) { const armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::Float32); @@ -177,7 +177,7 @@ static void ConstantUsageFloat32Test(const std::vector& computeD ); } -static void ConstantUsageUint8Test(const std::vector& computeDevice) +static void ConstantUsageUint8Test(const std::vector& computeDevice) { armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::QuantisedAsymm8); @@ -197,7 +197,7 @@ static void ConstantUsageUint8Test(const std::vector& computeDev BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32) { - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; ConstantUsageFloat32Test(backends); } @@ -217,7 +217,7 @@ BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32) BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8) { - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; ConstantUsageUint8Test(backends); } @@ -250,7 +250,7 @@ BOOST_AUTO_TEST_CASE(TrivialAdd) add->GetOutputSlot(0).SetTensorInfo(tensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. @@ -349,7 +349,7 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs) activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. @@ -410,7 +410,7 @@ BOOST_AUTO_TEST_CASE(FallbackToCpuRef) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // optimize the network - std::vector backends = {Compute::CpuAcc, Compute::CpuRef}; + std::vector backends = {Compute::CpuAcc, Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Load it into the runtime. It should pass. @@ -446,7 +446,7 @@ BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // optimize the network - std::vector backends = {Compute::CpuAcc}; + std::vector backends = {Compute::CpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(!optNet); } -- cgit v1.2.1