From a004251094074d7453531a25342d19dd66ee115f Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 30 Mar 2021 11:05:36 +0100 Subject: IVGCVSW-5799 'Create Pimpl Idiom for Async prototype' * Implemented Pimpl Idiom for IAsyncNetwork Signed-off-by: Sadik Armagan Change-Id: Ic7311880563568b014a27f6347f8d41f2ad96df6 --- src/armnn/AsyncNetwork.cpp | 67 +++++++++++++++++++++++++++++++++++++--------- src/armnn/AsyncNetwork.hpp | 22 +++++++-------- src/armnn/Runtime.cpp | 2 +- 3 files changed, 66 insertions(+), 25 deletions(-) (limited to 'src') diff --git a/src/armnn/AsyncNetwork.cpp b/src/armnn/AsyncNetwork.cpp index 4698bcf399..4e3838bb5d 100644 --- a/src/armnn/AsyncNetwork.cpp +++ b/src/armnn/AsyncNetwork.cpp @@ -26,6 +26,45 @@ namespace armnn namespace experimental { +IAsyncNetwork::IAsyncNetwork(std::unique_ptr net, + const INetworkProperties& networkProperties, + profiling::ProfilingService& profilingService) + : pAsyncNetworkImpl( new AsyncNetworkImpl(std::move(net), networkProperties, profilingService)) {}; + +IAsyncNetwork::~IAsyncNetwork() = default; + +TensorInfo IAsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const +{ + return pAsyncNetworkImpl->GetInputTensorInfo(layerId); +} + +TensorInfo IAsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const +{ + return pAsyncNetworkImpl->GetOutputTensorInfo(layerId); +} + +Status IAsyncNetwork::Execute(const InputTensors& inputTensors, + const OutputTensors& outputTensors, + IWorkingMemHandle& workingMemHandle) +{ + return pAsyncNetworkImpl->Execute(inputTensors, outputTensors, workingMemHandle); +} + +std::unique_ptr IAsyncNetwork::CreateWorkingMemHandle() +{ + return pAsyncNetworkImpl->CreateWorkingMemHandle(); +} + +std::shared_ptr IAsyncNetwork::GetProfiler() const +{ + return pAsyncNetworkImpl->GetProfiler(); +} + +void IAsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func) +{ + pAsyncNetworkImpl->RegisterDebugCallback(func); +} + void AddLayerStructure(std::unique_ptr& timelineUtils, const Layer& layer, profiling::ProfilingGuid networkGuid) @@ -63,7 +102,7 @@ void AddWorkloadStructure(std::unique_ptr& ti profiling::LabelsAndEventClasses::CHILD_GUID); } -TensorInfo AsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const +TensorInfo AsyncNetworkImpl::GetInputTensorInfo(LayerBindingId layerId) const { for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers()) { @@ -77,7 +116,7 @@ TensorInfo AsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const throw InvalidArgumentException(fmt::format("No input layer is associated with id {0}}", layerId)); } -TensorInfo AsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const +TensorInfo AsyncNetworkImpl::GetOutputTensorInfo(LayerBindingId layerId) const { for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers()) { @@ -93,7 +132,7 @@ TensorInfo AsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const } // Need something like the collectors to get the correct tensors for the inputs -void AsyncNetwork::CollectInputTensorHandles( +void AsyncNetworkImpl::CollectInputTensorHandles( std::unordered_map >& tensorHandles, std::vector& inputs, const armnn::Layer* layer, @@ -128,7 +167,7 @@ void AsyncNetwork::CollectInputTensorHandles( } } -void AsyncNetwork::CreateOutputTensorHandles( +void AsyncNetworkImpl::CreateOutputTensorHandles( std::unordered_map >& tensorHandles, std::vector& outputs, const armnn::Layer* layer, @@ -156,7 +195,7 @@ void AsyncNetwork::CreateOutputTensorHandles( tensorHandles.insert({guid, tensorHandleVectors}); } -const IWorkloadFactory& AsyncNetwork::GetWorkloadFactory(const Layer& layer) const +const IWorkloadFactory& AsyncNetworkImpl::GetWorkloadFactory(const Layer& layer) const { const IWorkloadFactory* workloadFactory = nullptr; @@ -181,7 +220,9 @@ const IWorkloadFactory& AsyncNetwork::GetWorkloadFactory(const Layer& layer) con return *workloadFactory; } -void AsyncNetwork::EnqueueInput(const BindableLayer& layer, const ConstTensor& inputTensor, WorkingMemHandle& context) +void AsyncNetworkImpl::EnqueueInput(const BindableLayer& layer, + const ConstTensor& inputTensor, + WorkingMemHandle& context) { if (layer.GetType() != LayerType::Input) { @@ -232,7 +273,7 @@ void AsyncNetwork::EnqueueInput(const BindableLayer& layer, const ConstTensor& i } } -void AsyncNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outputTensor, WorkingMemHandle& handle) +void AsyncNetworkImpl::EnqueueOutput(const BindableLayer& layer, const Tensor& outputTensor, WorkingMemHandle& handle) { if (layer.GetType() != LayerType::Output) { @@ -304,7 +345,7 @@ void AsyncNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outpu } } -AsyncNetwork::AsyncNetwork(std::unique_ptr net, +AsyncNetworkImpl::AsyncNetworkImpl(std::unique_ptr net, const INetworkProperties& networkProperties, profiling::ProfilingService& profilingService) : m_OptimizedNetwork(std::move(net)), @@ -421,7 +462,7 @@ AsyncNetwork::AsyncNetwork(std::unique_ptr net, } } -Status AsyncNetwork::Execute(const InputTensors& inputTensors, +Status AsyncNetworkImpl::Execute(const InputTensors& inputTensors, const OutputTensors& outputTensors, IWorkingMemHandle& iWorkingMemHandle) { @@ -529,12 +570,12 @@ Status AsyncNetwork::Execute(const InputTensors& inputTensors, } /// Get the profiler used for this network -std::shared_ptr AsyncNetwork::GetProfiler() const +std::shared_ptr AsyncNetworkImpl::GetProfiler() const { return m_Profiler; } -void AsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func) +void AsyncNetworkImpl::RegisterDebugCallback(const DebugCallbackFunction& func) { for (auto&& workloadPtr: m_WorkloadQueue) { @@ -544,7 +585,7 @@ void AsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func) /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have /// overlapped Execution by calling this function from different threads. -std::unique_ptr AsyncNetwork::CreateWorkingMemHandle() +std::unique_ptr AsyncNetworkImpl::CreateWorkingMemHandle() { Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph(); std::unordered_map > tensorHandles; @@ -592,7 +633,7 @@ std::unique_ptr AsyncNetwork::CreateWorkingMemHandle() return std::make_unique(workingMemDescriptors, workingMemDescriptorMap); } -void AsyncNetwork::FreeWorkingMemory() +void AsyncNetworkImpl::FreeWorkingMemory() { // Informs the memory managers to release memory in it's respective memory group for (auto&& workloadFactory : m_WorkloadFactories) diff --git a/src/armnn/AsyncNetwork.hpp b/src/armnn/AsyncNetwork.hpp index 9c525c5472..9bdc7eebd7 100644 --- a/src/armnn/AsyncNetwork.hpp +++ b/src/armnn/AsyncNetwork.hpp @@ -29,35 +29,35 @@ namespace armnn namespace experimental { -class AsyncNetwork final : public IAsyncNetwork +class AsyncNetworkImpl final { public: using WorkloadQueue = std::vector>; - AsyncNetwork(std::unique_ptr net, - const INetworkProperties &networkProperties, - profiling::ProfilingService &profilingService); + AsyncNetworkImpl(std::unique_ptr net, + const INetworkProperties &networkProperties, + profiling::ProfilingService &profilingService); - ~AsyncNetwork() { FreeWorkingMemory(); } + ~AsyncNetworkImpl() { FreeWorkingMemory(); } - TensorInfo GetInputTensorInfo(LayerBindingId layerId) const override; - TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const override; + TensorInfo GetInputTensorInfo(LayerBindingId layerId) const; + TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const; /// Thread safe execution of the network. Returns once execution is complete. /// Will block until this and any other thread using the same workingMem object completes. virtual Status Execute(const InputTensors& inputTensors, const OutputTensors& outputTensors, - IWorkingMemHandle& workingMemHandle) override; + IWorkingMemHandle& workingMemHandle); /// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have /// overlapped Execution by calling this function from different threads. - std::unique_ptr CreateWorkingMemHandle() override; + std::unique_ptr CreateWorkingMemHandle(); /// Get the profiler used for this network - std::shared_ptr GetProfiler() const override; + std::shared_ptr GetProfiler() const; /// Register a debug callback function to be used with this network - void RegisterDebugCallback(const DebugCallbackFunction& func) override; + void RegisterDebugCallback(const DebugCallbackFunction& func); private: void FreeWorkingMemory(); diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp index 5dc1ef9cc5..57aaabd277 100644 --- a/src/armnn/Runtime.cpp +++ b/src/armnn/Runtime.cpp @@ -187,7 +187,7 @@ std::unique_ptr RuntimeImpl::CreateAsyncNetwork(NetworkId& networ context.second->BeforeLoadNetwork(networkIdOut); } - unique_ptr asyncNetwork = std::make_unique( + unique_ptr asyncNetwork = std::make_unique( std::unique_ptr(rawNetwork), networkProperties, m_ProfilingService); -- cgit v1.2.1