diff options
author | David Beck <david.beck@arm.com> | 2018-10-19 15:20:56 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-22 16:57:54 +0100 |
commit | f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9 (patch) | |
tree | 5a8726ee4a397c421a6a41d6edca1a2d3183f168 /src/armnn/Network.cpp | |
parent | 7bc8c9fc9726d3c9ac002138c594688a006faac6 (diff) | |
download | armnn-f0b4845c1c6f24f59d4c88473b852cf69a3c7ae9.tar.gz |
IVGCVSW-2019 : replace Compute enum in the backend preferences list
Change-Id: Ie7549fd27378acfa97e68d098e338b8c9d4ea5d2
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r-- | src/armnn/Network.cpp | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index c43f336145..8c70e5d793 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -94,7 +94,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string } IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, - const std::vector<armnn::Compute>& backendPreferences, + const std::vector<BackendId>& backendPreferences, const IDeviceSpec& deviceSpec, const OptimizerOptions& options, Optional<std::vector<std::string>&> errMessages) @@ -133,8 +133,8 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // determine which of the preferred backends we have available for use // and whether we have specified CpuRef as one of those backends. bool cpuRefUsed = false; - std::vector<armnn::Compute> availablePreferredBackends; - for (const armnn::Compute& backend : backendPreferences) + std::vector<BackendId> availablePreferredBackends; + for (const auto& backend : backendPreferences) { // Check if the backend is in the available backend devices. if (std::find(spec.m_SupportedComputeDevices.begin(), @@ -142,7 +142,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, spec.m_SupportedComputeDevices.end()) { availablePreferredBackends.push_back(backend); - if (armnn::Compute::CpuRef == backend) { + if (backend == armnn::Compute::CpuRef) { cpuRefUsed = true; } } @@ -183,7 +183,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, // which haven't had a scale set and report them all back. bErrorFound = true; } - for (const armnn::Compute& backend : availablePreferredBackends) + for (const auto& backend : availablePreferredBackends) { // need to set the compute device on the layer // before we can check if it is supported @@ -205,7 +205,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, InsertConvertFp32ToFp16LayersAfter(optNetObjPtr->GetGraph(), *layer); // Assign a supported backend to the newly introduced conversion layers - auto AssignFirstSupportedBackend = [&](Layer* layer, Compute preferredBackend) + auto AssignFirstSupportedBackend = [&](Layer* layer, BackendId preferredBackend) { bool supportedBackendFound = false; std::string reasonIfUnsupported; @@ -218,7 +218,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } else { - for (const Compute& backend : availablePreferredBackends) + for (const auto& backend : availablePreferredBackends) { // Skip preferred backend (we already determined that it is not supported) if (backend == preferredBackend) |