From a3f4fbaf9ce6e30b3d1337bdfbb47b7301f97d1d Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Mon, 21 Mar 2022 09:27:08 +0000 Subject: IVGCVSW-6732 Tests surrounded in '#if defined(ARMNNREF_ENABLED)' in android-nn-driver do not execute. * Change to src/backends/cl/workloads/ClLstmFloatWorkload.cpp fix LstmTests_GpuAcc tests. * Change to src/backends/cl/workloads/ClConvertFp16ToFp32Workload.hpp & ClConvertFp32ToFp16Workload.hpp fix MeanTests_GpuAcc and Convolution2DTests_1.1 tests. * Added UnitTests to src/backends/cl/test/ClImportTensorHandleTests.cpp to test import on Convert Layers. !android-nn-driver:7264 Signed-off-by: Cathal Corbett Change-Id: I0c46dc4b9c54eca8771ab12ed0302b6224606957 --- src/armnn/LoadedNetwork.cpp | 2 +- src/armnn/Network.cpp | 32 ++++++++++++++++++++++++++++---- src/armnn/Network.hpp | 4 ++++ 3 files changed, 33 insertions(+), 5 deletions(-) (limited to 'src/armnn') diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 6d08fc321e..0e577354f0 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -1456,7 +1456,7 @@ std::vector LoadedNetwork::ImportOutputs(const OutputTensors& { throw MemoryImportException("ImportOutputs: Force Import failed, incorrect number of tensors"); } - std::vector importedOutputs; + std::vector importedOutputs; Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort(); unsigned int outputIndex = 0; diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 339da0d1b8..a3655509fb 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1658,7 +1658,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph, return result; } -IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, +IOptimizedNetworkPtr Optimize(const Graph& inGraph, const std::vector& backendPreferences, const IDeviceSpec& deviceSpec, const OptimizerOptions& options, @@ -1667,7 +1667,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, ARMNN_LOG(debug) << options.ToString(); // Enable profiling - auto profiler = inNetwork.pNetworkImpl->GetGraph().GetProfiler(); + auto profiler = inGraph.GetProfiler(); ProfilerManager::GetInstance().RegisterProfiler(profiler.get()); profiler->EnableProfiling(options.m_ProfilingEnabled); @@ -1683,9 +1683,9 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } // Ensure TensorInfo is set on all output slots of ConstantLayers in the graph - inNetwork.pNetworkImpl->GetGraph().VerifyConstantLayerSetTensorInfo(); + inGraph.VerifyConstantLayerSetTensorInfo(); - std::unique_ptr graph = std::make_unique(inNetwork.pNetworkImpl->GetGraph()); + std::unique_ptr graph = std::make_unique(inGraph); auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions), &IOptimizedNetwork::Destroy); @@ -1827,6 +1827,20 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } return optNet; } + +IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, + const std::vector& backendPreferences, + const IDeviceSpec& deviceSpec, + const OptimizerOptions& options, + Optional&> messages) +{ + return Optimize(inNetwork.pNetworkImpl->GetGraph(), + backendPreferences, + deviceSpec, + options, + messages); +} + bool NetworkImpl::GetShapeInferenceMethod() { if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod") @@ -2000,6 +2014,16 @@ IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDes return layer; } +IConnectableLayer* NetworkImpl::AddConvertFp16ToFp32Layer(const char* name) +{ + return m_Graph->AddLayer(name); +} + +IConnectableLayer* NetworkImpl::AddConvertFp32ToFp16Layer(const char* name) +{ + return m_Graph->AddLayer(name); +} + IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor, const ConstTensor& weights, const Optional& biases, diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 1d88f267d1..fffad86b80 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -238,6 +238,10 @@ public: const LstmInputParams& params, const char* name = nullptr); + IConnectableLayer* AddConvertFp16ToFp32Layer(const char* name = nullptr); + + IConnectableLayer* AddConvertFp32ToFp16Layer(const char* name = nullptr); + ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor& visitor) const; ARMNN_NO_DEPRECATE_WARN_END -- cgit v1.2.1