diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-03-18 10:57:30 +0000 |
---|---|---|
committer | Sadik Armagan <sadik.armagan@arm.com> | 2020-03-18 15:48:21 +0000 |
commit | 3184c907b2420e6c66485529f336251b2b62aecf (patch) | |
tree | b79190007f80da9bb9d827efb714ce13a0ffb2bb /src/armnn | |
parent | e6a2ccd09060ba93203ddc5a7f79260cedf2c147 (diff) | |
download | armnn-3184c907b2420e6c66485529f336251b2b62aecf.tar.gz |
IVGCVSW-4463 Change ProfilingService to a member of runtime from a singleton
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I345c39a10a4693a500aa1687d9a5cee76da791c3
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/BackendRegistry.cpp | 9 | ||||
-rw-r--r-- | src/armnn/Layer.cpp | 2 | ||||
-rw-r--r-- | src/armnn/LoadedNetwork.cpp | 30 | ||||
-rw-r--r-- | src/armnn/LoadedNetwork.hpp | 10 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 6 | ||||
-rw-r--r-- | src/armnn/Network.hpp | 3 | ||||
-rw-r--r-- | src/armnn/Runtime.cpp | 20 | ||||
-rw-r--r-- | src/armnn/Runtime.hpp | 7 | ||||
-rw-r--r-- | src/armnn/test/RuntimeTests.cpp | 27 |
9 files changed, 62 insertions, 52 deletions
diff --git a/src/armnn/BackendRegistry.cpp b/src/armnn/BackendRegistry.cpp index 35e82f2e67..a79cdd0bb2 100644 --- a/src/armnn/BackendRegistry.cpp +++ b/src/armnn/BackendRegistry.cpp @@ -5,7 +5,6 @@ #include <armnn/BackendRegistry.hpp> #include <armnn/Exceptions.hpp> -#include <ProfilingService.hpp> namespace armnn { @@ -24,19 +23,11 @@ void BackendRegistry::Register(const BackendId& id, BackendRegistry::FactoryFunc std::string(id) + " already registered as IBackend factory", CHECK_LOCATION()); } - if (profiling::ProfilingService::Instance().IsProfilingEnabled()) - { - profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::REGISTERED_BACKENDS); - } m_Factories[id] = factory; } void BackendRegistry::Deregister(const BackendId& id) { - if (profiling::ProfilingService::Instance().IsProfilingEnabled()) - { - profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::UNREGISTERED_BACKENDS); - } m_Factories.erase(id); } diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp index 9de812c6e5..29d85b5a4c 100644 --- a/src/armnn/Layer.cpp +++ b/src/armnn/Layer.cpp @@ -194,7 +194,7 @@ Layer::Layer(unsigned int numInputSlots, , m_Type(type) , m_BackendId() , m_BackendHint(EmptyOptional()) -, m_Guid(profiling::ProfilingService::Instance().NextGuid()) +, m_Guid(profiling::ProfilingService::GetNextGuid()) { IgnoreUnused(layout); m_InputSlots.reserve(numInputSlots); diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp index 69e42ba38f..f3d742c515 100644 --- a/src/armnn/LoadedNetwork.cpp +++ b/src/armnn/LoadedNetwork.cpp @@ -20,7 +20,6 @@ #include <backendsCommon/MemSyncWorkload.hpp> #include <LabelsAndEventClasses.hpp> -#include <ProfilingService.hpp> #include <boost/polymorphic_cast.hpp> #include <boost/assert.hpp> @@ -84,7 +83,8 @@ void AddWorkloadStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net, std::string& errorMessage, - const INetworkProperties& networkProperties) + const INetworkProperties& networkProperties, + profiling::ProfilingService& profilingService) { std::unique_ptr<LoadedNetwork> loadedNetwork; @@ -98,7 +98,7 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr< try { - loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties)); + loadedNetwork.reset(new LoadedNetwork(std::move(net), networkProperties, profilingService)); } catch (const armnn::RuntimeException& error) { @@ -117,10 +117,12 @@ std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr< } LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, - const INetworkProperties& networkProperties) : + const INetworkProperties& networkProperties, + profiling::ProfilingService& profilingService) : m_OptimizedNetwork(std::move(net)), m_IsImportEnabled(networkProperties.m_ImportEnabled), - m_IsExportEnabled(networkProperties.m_ExportEnabled) + m_IsExportEnabled(networkProperties.m_ExportEnabled), + m_ProfilingService(profilingService) { // Create a profiler and register it for the current thread. m_Profiler = std::make_shared<Profiler>(); @@ -191,7 +193,8 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, } ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid(); - std::unique_ptr<TimelineUtilityMethods> timelineUtils = TimelineUtilityMethods::GetTimelineUtils(); + std::unique_ptr<TimelineUtilityMethods> timelineUtils = + TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService); if (timelineUtils) { timelineUtils->CreateTypedEntity(networkGuid, LabelsAndEventClasses::NETWORK_GUID); @@ -449,8 +452,9 @@ Status LoadedNetwork::EnqueueWorkload(const InputTensors& inputTensors, EnqueueOutput(*outputLayer, pin.GetTensorHandle(), pin.GetTensorInfo()); } - std::unique_ptr<TimelineUtilityMethods> timelineUtils = TimelineUtilityMethods::GetTimelineUtils(); - ProfilingGuid inferenceGuid = ProfilingService::Instance().NextGuid(); + std::unique_ptr<TimelineUtilityMethods> timelineUtils = + TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService); + ProfilingGuid inferenceGuid = m_ProfilingService.GetNextGuid(); if (timelineUtils) { // Add inference timeline trace if profiling is enabled. @@ -463,9 +467,9 @@ Status LoadedNetwork::EnqueueWorkload(const InputTensors& inputTensors, bool executionSucceeded = true; { - if (profiling::ProfilingService::Instance().IsProfilingEnabled()) + if (m_ProfilingService.IsProfilingEnabled()) { - profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::INFERENCES_RUN); + m_ProfilingService.IncrementCounterValue(armnn::profiling::INFERENCES_RUN); } ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Execute"); ARMNN_SCOPED_HEAP_PROFILING("Executing"); @@ -535,7 +539,8 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens BOOST_ASSERT_MSG(inputWorkload, "No input workload created"); - std::unique_ptr<TimelineUtilityMethods> timelineUtils = TimelineUtilityMethods::GetTimelineUtils(); + std::unique_ptr<TimelineUtilityMethods> timelineUtils = + TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService); if (timelineUtils) { // Add Input Workload to the post-optimisation network structure @@ -627,7 +632,8 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info); BOOST_ASSERT_MSG(outputWorkload, "No output workload created"); - std::unique_ptr<TimelineUtilityMethods> timelineUtils = TimelineUtilityMethods::GetTimelineUtils(); + std::unique_ptr<TimelineUtilityMethods> timelineUtils = + TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService); if (timelineUtils) { // Add Output Workload to the post-optimisation network structure diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp index ab2c8be1cd..01e3442508 100644 --- a/src/armnn/LoadedNetwork.hpp +++ b/src/armnn/LoadedNetwork.hpp @@ -15,6 +15,7 @@ #include <backendsCommon/TensorHandleFactoryRegistry.hpp> #include <backendsCommon/Workload.hpp> #include <backendsCommon/WorkloadFactory.hpp> +#include <ProfilingService.hpp> #include <TimelineUtilityMethods.hpp> #include <mutex> @@ -43,7 +44,8 @@ public: static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net, std::string & errorMessage, - const INetworkProperties& networkProperties); + const INetworkProperties& networkProperties, + profiling::ProfilingService& profilingService); // NOTE we return by reference as the purpose of this method is only to provide // access to the private m_Profiler and in theory we should not need to increment @@ -57,7 +59,9 @@ public: private: void AllocateWorkingMemory(); - LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, const INetworkProperties& networkProperties); + LoadedNetwork(std::unique_ptr<OptimizedNetwork> net, + const INetworkProperties& networkProperties, + profiling::ProfilingService& profilingService); void EnqueueInput(const BindableLayer& layer, ITensorHandle* tensorHandle, const TensorInfo& tensorInfo); @@ -92,6 +96,8 @@ private: bool m_IsExportEnabled=false; TensorHandleFactoryRegistry m_TensorHandleFactoryRegistry; + + profiling::ProfilingService& m_ProfilingService; }; } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 3663727e48..9eef7b2fb6 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1023,8 +1023,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, } Network::Network() -: m_Graph(std::make_unique<Graph>()), - m_Guid(profiling::ProfilingService::Instance().NextGuid()) +: m_Graph(std::make_unique<Graph>()) { } @@ -1680,8 +1679,7 @@ void Network::Accept(ILayerVisitor& visitor) const } OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph) - : m_Graph(std::move(graph)), - m_Guid(profiling::ProfilingService::Instance().NextGuid()) + : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()) { } diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 089b46c9ca..17eacba48a 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -35,8 +35,6 @@ public: Status PrintGraph() override; - profiling::ProfilingGuid GetGuid() const final { return m_Guid; }; - IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override; IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc, @@ -259,7 +257,6 @@ private: const char* name); std::unique_ptr<Graph> m_Graph; - profiling::ProfilingGuid m_Guid; }; class OptimizedNetwork final : public IOptimizedNetwork diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp index b1017c58ed..26636a81f7 100644 --- a/src/armnn/Runtime.cpp +++ b/src/armnn/Runtime.cpp @@ -11,8 +11,6 @@ #include <armnn/backends/IBackendContext.hpp> #include <backendsCommon/DynamicBackendUtils.hpp> -#include <ProfilingService.hpp> - #include <iostream> #include <boost/polymorphic_cast.hpp> @@ -75,7 +73,8 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut, unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork( std::unique_ptr<OptimizedNetwork>(boost::polymorphic_downcast<OptimizedNetwork*>(rawNetwork)), errorMessage, - networkProperties); + networkProperties, + m_ProfilingService); if (!loadedNetwork) { @@ -94,9 +93,9 @@ Status Runtime::LoadNetwork(NetworkId& networkIdOut, context.second->AfterLoadNetwork(networkIdOut); } - if (profiling::ProfilingService::Instance().IsProfilingEnabled()) + if (m_ProfilingService.IsProfilingEnabled()) { - profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::NETWORK_LOADS); + m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_LOADS); } return Status::Success; @@ -125,9 +124,10 @@ Status Runtime::UnloadNetwork(NetworkId networkId) ARMNN_LOG(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!"; return Status::Failure; } - if (profiling::ProfilingService::Instance().IsProfilingEnabled()) + + if (m_ProfilingService.IsProfilingEnabled()) { - profiling::ProfilingService::Instance().IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS); + m_ProfilingService.IncrementCounterValue(armnn::profiling::NETWORK_UNLOADS); } } @@ -158,7 +158,7 @@ Runtime::Runtime(const CreationOptions& options) ARMNN_LOG(info) << "ArmNN v" << ARMNN_VERSION << "\n"; // pass configuration info to the profiling service - armnn::profiling::ProfilingService::Instance().ConfigureProfilingService(options.m_ProfilingOptions); + m_ProfilingService.ConfigureProfilingService(options.m_ProfilingOptions); // Load any available/compatible dynamic backend before the runtime // goes through the backend registry @@ -185,7 +185,7 @@ Runtime::Runtime(const CreationOptions& options) unique_ptr<armnn::profiling::IBackendProfiling> profilingIface = std::make_unique<armnn::profiling::BackendProfiling>(armnn::profiling::BackendProfiling( - options, armnn::profiling::ProfilingService::Instance(), id)); + options, m_ProfilingService, id)); // Backends may also provide a profiling context. Ask for it now. auto profilingContext = backend->CreateBackendProfilingContext(options, profilingIface); @@ -196,7 +196,7 @@ Runtime::Runtime(const CreationOptions& options) if(profilingContext->EnableProfiling(true)) { // Pass the context onto the profiling service. - armnn::profiling::ProfilingService::Instance().AddBackendProfilingContext(id, profilingContext); + m_ProfilingService.AddBackendProfilingContext(id, profilingContext); } else { diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp index 2ad3c9633c..477b1169b1 100644 --- a/src/armnn/Runtime.hpp +++ b/src/armnn/Runtime.hpp @@ -14,6 +14,8 @@ #include <armnn/backends/DynamicBackend.hpp> +#include <ProfilingService.hpp> + #include <mutex> #include <unordered_map> @@ -80,6 +82,8 @@ public: private: friend void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime); // See RuntimeTests.cpp + friend profiling::ProfilingService& GetProfilingService(armnn::Runtime* runtime); // See RuntimeTests.cpp + int GenerateNetworkId(); LoadedNetwork* GetLoadedNetworkPtr(NetworkId networkId) const; @@ -109,6 +113,9 @@ private: /// List of dynamic backends loaded in the runtime std::vector<DynamicBackendPtr> m_DynamicBackends; + + /// Profiling Service Instance + profiling::ProfilingService m_ProfilingService; }; } // namespace armnn diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index e3cbe03c62..9ced7e907c 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -30,6 +30,11 @@ void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime) runtime->m_LoadedNetworks.reserve(1); } +profiling::ProfilingService& GetProfilingService(armnn::Runtime* runtime) +{ + return runtime->m_ProfilingService; +} + } BOOST_AUTO_TEST_SUITE(Runtime) @@ -327,7 +332,7 @@ BOOST_AUTO_TEST_CASE(ProfilingDisable) // Create runtime in which the test will run armnn::IRuntime::CreationOptions options; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + armnn::Runtime runtime(options); // build up the structure of the network INetworkPtr net(INetwork::Create()); @@ -348,13 +353,13 @@ BOOST_AUTO_TEST_CASE(ProfilingDisable) // optimize the network std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec()); // Load it into the runtime. It should succeed. armnn::NetworkId netId; - BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success); + BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success); - profiling::ProfilingServiceRuntimeHelper profilingServiceHelper; + profiling::ProfilingServiceRuntimeHelper profilingServiceHelper(GetProfilingService(&runtime)); profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager(); auto readableBuffer = bufferManager.GetReadableBuffer(); @@ -370,7 +375,7 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef) // Create runtime in which the test will run armnn::IRuntime::CreationOptions options; options.m_ProfilingOptions.m_EnableProfiling = true; - armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + armnn::Runtime runtime(options); // build up the structure of the network INetworkPtr net(INetwork::Create()); @@ -390,15 +395,15 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef) // optimize the network std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec()); ProfilingGuid optNetGuid = optNet->GetGuid(); // Load it into the runtime. It should succeed. armnn::NetworkId netId; - BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success); + BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success); - profiling::ProfilingServiceRuntimeHelper profilingServiceHelper; + profiling::ProfilingServiceRuntimeHelper profilingServiceHelper(GetProfilingService(&runtime)); profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager(); auto readableBuffer = bufferManager.GetReadableBuffer(); @@ -681,15 +686,15 @@ BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef) InputTensors inputTensors { - { 0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data()) } + {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())} }; OutputTensors outputTensors { - { 0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) } + {0, Tensor(runtime.GetOutputTensorInfo(netId, 0), outputData.data())} }; // Does the inference. - runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + runtime.EnqueueWorkload(netId, inputTensors, outputTensors); // Get readable buffer for inference timeline auto inferenceReadableBuffer = bufferManager.GetReadableBuffer(); |