From f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9 Mon Sep 17 00:00:00 2001 From: David Beck Date: Fri, 19 Oct 2018 15:20:56 +0100 Subject: IVGCVSW-2019 : replace Compute enum in the backend preferences list Change-Id: Ie7549fd27378acfa97e68d098e338b8c9d4ea5d2 --- src/armnn/DeviceSpec.hpp | 2 +- src/armnn/Network.cpp | 14 +++++++------- src/armnn/test/EndToEndTest.cpp | 20 ++++++++++---------- src/armnn/test/JsonPrinterTests.cpp | 34 +++++++++++++++++----------------- src/armnn/test/NetworkTests.cpp | 26 +++++++++++++------------- src/armnn/test/ProfilerTests.cpp | 2 +- src/armnn/test/RuntimeTests.cpp | 26 +++++++++++++------------- 7 files changed, 62 insertions(+), 62 deletions(-) (limited to 'src/armnn') diff --git a/src/armnn/DeviceSpec.hpp b/src/armnn/DeviceSpec.hpp index 34acbcbdec..af0d8f578b 100644 --- a/src/armnn/DeviceSpec.hpp +++ b/src/armnn/DeviceSpec.hpp @@ -22,7 +22,7 @@ public: return std::vector(); } - std::set m_SupportedComputeDevices; + std::set m_SupportedComputeDevices; }; } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index c43f336145..8c70e5d793 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -94,7 +94,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional& backendPreferences, + const std::vector& backendPreferences, const IDeviceSpec& deviceSpec, const OptimizerOptions& options, Optional&> errMessages) @@ -133,8 +133,8 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // determine which of the preferred backends we have available for use // and whether we have specified CpuRef as one of those backends. bool cpuRefUsed = false; - std::vector availablePreferredBackends; - for (const armnn::Compute& backend : backendPreferences) + std::vector availablePreferredBackends; + for (const auto& backend : backendPreferences) { // Check if the backend is in the available backend devices. if (std::find(spec.m_SupportedComputeDevices.begin(), @@ -142,7 +142,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, spec.m_SupportedComputeDevices.end()) { availablePreferredBackends.push_back(backend); - if (armnn::Compute::CpuRef == backend) { + if (backend == armnn::Compute::CpuRef) { cpuRefUsed = true; } } @@ -183,7 +183,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // which haven't had a scale set and report them all back. bErrorFound = true; } - for (const armnn::Compute& backend : availablePreferredBackends) + for (const auto& backend : availablePreferredBackends) { // need to set the compute device on the layer // before we can check if it is supported @@ -205,7 +205,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer); // Assign a supported backend to the newly introduced conversion layers - auto AssignFirstSupportedBackend = [&](Layer* layer, Compute preferredBackend) + auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend) { bool supportedBackendFound = false; std::string reasonIfUnsupported; @@ -218,7 +218,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } else { - for (const Compute& backend : availablePreferredBackends) + for (const auto& backend : availablePreferredBackends) { // Skip preferred backend (we already determined that it is not supported) if (backend == preferredBackend) diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp index 98b18411d4..d34bf69548 100644 --- a/src/armnn/test/EndToEndTest.cpp +++ b/src/armnn/test/EndToEndTest.cpp @@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(Unsigned8) softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. @@ -110,7 +110,7 @@ BOOST_AUTO_TEST_CASE(Unsigned8) } template -void ConstantUsageTest(const std::vector& computeDevice, +void ConstantUsageTest(const std::vector& computeDevice, const armnn::TensorInfo& commonTensorInfo, const std::vector& inputData, const std::vector& constantData, @@ -165,7 +165,7 @@ void ConstantUsageTest(const std::vector& computeDevice, BOOST_TEST(outputData == expectedOutputData); } -static void ConstantUsageFloat32Test(const std::vector& computeDevice) +static void ConstantUsageFloat32Test(const std::vector& computeDevice) { const armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::Float32); @@ -177,7 +177,7 @@ static void ConstantUsageFloat32Test(const std::vector& computeD ); } -static void ConstantUsageUint8Test(const std::vector& computeDevice) +static void ConstantUsageUint8Test(const std::vector& computeDevice) { armnn::TensorInfo commonTensorInfo({ 2, 3 }, armnn::DataType::QuantisedAsymm8); @@ -197,7 +197,7 @@ static void ConstantUsageUint8Test(const std::vector& computeDev BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32) { - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; ConstantUsageFloat32Test(backends); } @@ -217,7 +217,7 @@ BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32) BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8) { - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; ConstantUsageUint8Test(backends); } @@ -250,7 +250,7 @@ BOOST_AUTO_TEST_CASE(TrivialAdd) add->GetOutputSlot(0).SetTensorInfo(tensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. @@ -349,7 +349,7 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs) activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Loads it into the runtime. @@ -410,7 +410,7 @@ BOOST_AUTO_TEST_CASE(FallbackToCpuRef) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // optimize the network - std::vector backends = {Compute::CpuAcc, Compute::CpuRef}; + std::vector backends = {Compute::CpuAcc, Compute::CpuRef}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Load it into the runtime. It should pass. @@ -446,7 +446,7 @@ BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // optimize the network - std::vector backends = {Compute::CpuAcc}; + std::vector backends = {Compute::CpuAcc}; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(!optNet); } diff --git a/src/armnn/test/JsonPrinterTests.cpp b/src/armnn/test/JsonPrinterTests.cpp index 44609ea6f9..93f32cc540 100644 --- a/src/armnn/test/JsonPrinterTests.cpp +++ b/src/armnn/test/JsonPrinterTests.cpp @@ -117,7 +117,7 @@ std::vector ExtractSections(const std::string& exp) return sections; } -std::string SoftmaxProfilerTestSetupHelper(const std::vector& backends) +std::string SoftmaxProfilerTestSetupHelper(const std::vector& backends) { using namespace armnn; @@ -239,7 +239,7 @@ void SoftmaxProfilerTestValidationHelper(std::string& result, const std::string& } void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult( - const std::vector& backends) + const std::vector& backends) { // setup the test fixture and obtain JSON Printer result std::string result = SoftmaxProfilerTestSetupHelper(backends); @@ -250,10 +250,10 @@ void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult( std::string changeLine40; std::string changeLine45; - switch(backends[0]) { - case armnn::Compute::GpuAcc: backend = "Cl"; - changeLine31 = ",\n\"OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]\": {"; - changeLine39 = R"(us" + if (backends[0] == armnn::Compute::GpuAcc) { + backend = "Cl"; + changeLine31 = ",\n\"OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]\": {"; + changeLine39 = R"(us" }, "OpenClKernelTimer/: softmax_layer_norm_quantized GWS[,,]": { "raw": [ @@ -263,7 +263,7 @@ void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult( ], "unit": "us")"; - changeLine40 = R"( + changeLine40 = R"( }, "CopyMemGeneric_Execute": { "raw": [ @@ -272,11 +272,13 @@ void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult( ], "unit": "us")"; - changeLine45 = "}\n"; - break; - case armnn::Compute::CpuAcc: backend = "Neon"; - changeLine31 = ",\n\"NeonKernelTimer/: NEFillBorderKernel\": {"; - changeLine39 = R"(us" + changeLine45 = "}\n"; + } + else if (backends[0] == armnn::Compute::CpuAcc) + { + backend = "Neon"; + changeLine31 = ",\n\"NeonKernelTimer/: NEFillBorderKernel\": {"; + changeLine39 = R"(us" }, "NeonKernelTimer/: NELogitsDMaxKernel": { "raw": [ @@ -293,7 +295,7 @@ void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult( ], "unit": "us")"; - changeLine40 = R"( + changeLine40 = R"( }, "CopyMemGeneric_Execute": { "raw": [ @@ -302,11 +304,9 @@ void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJSONPrinterResult( ], "unit": "us")"; - changeLine45 = "}\n"; - break; - default: - break; + changeLine45 = "}\n"; } + std::string testData = R"({ "ArmNN": { "inference_measurements": { diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index 3b426fa8ab..4f8dd7ea7b 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -65,7 +65,7 @@ BOOST_AUTO_TEST_CASE(SerializeToDot) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec()); std::ostringstream ss; @@ -472,7 +472,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuRef }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec()); static_cast(optNet.get())->GetGraph().AllocateDynamicBuffers(); BOOST_CHECK(optNet); @@ -503,7 +503,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::CpuAcc }; + std::vector backends = { armnn::Compute::CpuAcc }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(optNet); // validate workloads @@ -534,7 +534,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::GpuAcc }; + std::vector backends = { armnn::Compute::GpuAcc }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(optNet); // validate workloads @@ -570,7 +570,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::CpuAcc }; + std::vector backends = { armnn::Compute::CpuAcc }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(!optNet); } @@ -597,7 +597,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_REQUIRE(optNet); @@ -676,7 +676,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::Undefined }; + std::vector backends = { armnn::Compute::Undefined }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(!optNet); @@ -738,7 +738,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(optNet); @@ -774,7 +774,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::CpuAcc, + std::vector backends = { armnn::Compute::CpuAcc, armnn::Compute::GpuAcc, armnn::Compute::CpuRef }; @@ -818,7 +818,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; // build up the structure of the network armnn::INetworkPtr net(armnn::INetwork::Create()); @@ -851,7 +851,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; // build up the structure of the network armnn::INetworkPtr net(armnn::INetwork::Create()); @@ -909,7 +909,7 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; armnn::OptimizerOptions optimizerOptions; optimizerOptions.m_ReduceFp32ToFp16 = true; @@ -976,7 +976,7 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = {armnn::Compute::GpuAcc}; + std::vector backends = {armnn::Compute::GpuAcc}; armnn::OptimizerOptions optimizerOptions; optimizerOptions.m_ReduceFp32ToFp16 = true; diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp index 8d7684459a..650c13a4f4 100644 --- a/src/armnn/test/ProfilerTests.cpp +++ b/src/armnn/test/ProfilerTests.cpp @@ -163,7 +163,7 @@ BOOST_AUTO_TEST_CASE(RuntimeLoadNetwork) armnn::NetworkId networkIdentifier = 1; armnn::INetworkPtr mockNetwork(armnn::INetwork::Create()); mockNetwork->AddInputLayer(0, "test layer"); - std::vector backends = { armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuRef }; runtime->LoadNetwork(networkIdentifier, armnn::Optimize(*mockNetwork, backends, runtime->GetDeviceSpec())); // Check that now there's a profiler registered for this thread (created and registered by the loading the network). diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 0237387b93..76f5774a49 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork) armnn::NetworkId networkIdentifier1 = 1; armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create()); mockNetwork1->AddInputLayer(0, "test layer"); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime->GetDeviceSpec())); // Mock network 2. @@ -71,7 +71,7 @@ struct DisableGlobalLeakChecking BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking); -void CreateAndDropDummyNetwork(const std::vector& backends, armnn::Runtime& runtime) +void CreateAndDropDummyNetwork(const std::vector& backends, armnn::Runtime& runtime) { armnn::NetworkId networkIdentifier; { @@ -129,7 +129,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc) armnn::Runtime runtime(options); armnn::RuntimeLoadedNetworksReserve(&runtime); - std::vector backends = {armnn::Compute::GpuAcc}; + std::vector backends = {armnn::Compute::GpuAcc}; { // Do a warmup of this so we make sure that all one-time // initialization happens before we do the leak checking. @@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc) armnn::Runtime runtime(options); armnn::RuntimeLoadedNetworksReserve(&runtime); - std::vector backends = {armnn::Compute::CpuAcc}; + std::vector backends = {armnn::Compute::CpuAcc}; { // Do a warmup of this so we make sure that all one-time // initialization happens before we do the leak checking. @@ -187,7 +187,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef) armnn::Runtime runtime(options); armnn::RuntimeLoadedNetworksReserve(&runtime); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; { // Do a warmup of this so we make sure that all one-time // initialization happens before we do the leak checking. @@ -243,7 +243,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage) VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed); // build a mock-network and load it into the runtime - std::vector backends = {armnn::Compute::GpuAcc}; + std::vector backends = {armnn::Compute::GpuAcc}; { armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32); armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32); @@ -331,7 +331,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak) mockNetwork1->AddInputLayer(0, "test layer"); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec())); } @@ -379,7 +379,7 @@ BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::CpuAcc }; + std::vector backends = { armnn::Compute::CpuAcc }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(optNet); @@ -406,7 +406,7 @@ BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback) armnn::IRuntime::CreationOptions options; armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); - std::vector backends = { armnn::Compute::GpuAcc }; + std::vector backends = { armnn::Compute::GpuAcc }; armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); BOOST_CHECK(optNet); @@ -442,7 +442,7 @@ BOOST_AUTO_TEST_CASE(RuntimeCpuRef) normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // optimize the network - std::vector backends = { armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuRef }; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); // Load it into the runtime. It should success. @@ -476,7 +476,7 @@ BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef) normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // Allow fallback to CpuRef. - std::vector backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef }; + std::vector backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef }; // optimize the network IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); @@ -524,7 +524,7 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue) armnn::DataType::QuantisedAsymm8 )); - std::vector backends = {armnn::Compute::CpuRef}; + std::vector backends = {armnn::Compute::CpuRef}; std::vector errMessages; armnn::IOptimizedNetworkPtr optNet = Optimize( *net, @@ -533,7 +533,7 @@ BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue) OptimizerOptions(), errMessages ); - + BOOST_TEST(errMessages.size() == 1); BOOST_TEST(errMessages[0] == "ERROR: output 0 of layer Softmax (softmax) is of type " -- cgit v1.2.1