aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-03-30 11:05:36 +0100
committerSadik Armagan <sadik.armagan@arm.com>2021-03-30 11:05:36 +0100
commita004251094074d7453531a25342d19dd66ee115f (patch)
tree32f9e14dd679641c27c3e86f1a16073348738466 /src
parent34b9aba8fb89ec6874d3d72555714955db9c1b72 (diff)
downloadarmnn-a004251094074d7453531a25342d19dd66ee115f.tar.gz
IVGCVSW-5799 'Create Pimpl Idiom for Async prototype'
* Implemented Pimpl Idiom for IAsyncNetwork Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: Ic7311880563568b014a27f6347f8d41f2ad96df6
Diffstat (limited to 'src')
-rw-r--r--src/armnn/AsyncNetwork.cpp67
-rw-r--r--src/armnn/AsyncNetwork.hpp22
-rw-r--r--src/armnn/Runtime.cpp2
3 files changed, 66 insertions, 25 deletions
diff --git a/src/armnn/AsyncNetwork.cpp b/src/armnn/AsyncNetwork.cpp
index 4698bcf399..4e3838bb5d 100644
--- a/src/armnn/AsyncNetwork.cpp
+++ b/src/armnn/AsyncNetwork.cpp
@@ -26,6 +26,45 @@ namespace armnn
namespace experimental
{
+IAsyncNetwork::IAsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
+ const INetworkProperties& networkProperties,
+ profiling::ProfilingService& profilingService)
+ : pAsyncNetworkImpl( new AsyncNetworkImpl(std::move(net), networkProperties, profilingService)) {};
+
+IAsyncNetwork::~IAsyncNetwork() = default;
+
+TensorInfo IAsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const
+{
+ return pAsyncNetworkImpl->GetInputTensorInfo(layerId);
+}
+
+TensorInfo IAsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
+{
+ return pAsyncNetworkImpl->GetOutputTensorInfo(layerId);
+}
+
+Status IAsyncNetwork::Execute(const InputTensors& inputTensors,
+ const OutputTensors& outputTensors,
+ IWorkingMemHandle& workingMemHandle)
+{
+ return pAsyncNetworkImpl->Execute(inputTensors, outputTensors, workingMemHandle);
+}
+
+std::unique_ptr<IWorkingMemHandle> IAsyncNetwork::CreateWorkingMemHandle()
+{
+ return pAsyncNetworkImpl->CreateWorkingMemHandle();
+}
+
+std::shared_ptr<IProfiler> IAsyncNetwork::GetProfiler() const
+{
+ return pAsyncNetworkImpl->GetProfiler();
+}
+
+void IAsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func)
+{
+ pAsyncNetworkImpl->RegisterDebugCallback(func);
+}
+
void AddLayerStructure(std::unique_ptr<profiling::TimelineUtilityMethods>& timelineUtils,
const Layer& layer,
profiling::ProfilingGuid networkGuid)
@@ -63,7 +102,7 @@ void AddWorkloadStructure(std::unique_ptr<profiling::TimelineUtilityMethods>& ti
profiling::LabelsAndEventClasses::CHILD_GUID);
}
-TensorInfo AsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const
+TensorInfo AsyncNetworkImpl::GetInputTensorInfo(LayerBindingId layerId) const
{
for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
{
@@ -77,7 +116,7 @@ TensorInfo AsyncNetwork::GetInputTensorInfo(LayerBindingId layerId) const
throw InvalidArgumentException(fmt::format("No input layer is associated with id {0}}", layerId));
}
-TensorInfo AsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
+TensorInfo AsyncNetworkImpl::GetOutputTensorInfo(LayerBindingId layerId) const
{
for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
{
@@ -93,7 +132,7 @@ TensorInfo AsyncNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
}
// Need something like the collectors to get the correct tensors for the inputs
-void AsyncNetwork::CollectInputTensorHandles(
+void AsyncNetworkImpl::CollectInputTensorHandles(
std::unordered_map<LayerGuid, std::vector<ITensorHandle*> >& tensorHandles,
std::vector<ITensorHandle*>& inputs,
const armnn::Layer* layer,
@@ -128,7 +167,7 @@ void AsyncNetwork::CollectInputTensorHandles(
}
}
-void AsyncNetwork::CreateOutputTensorHandles(
+void AsyncNetworkImpl::CreateOutputTensorHandles(
std::unordered_map<LayerGuid, std::vector<ITensorHandle*> >& tensorHandles,
std::vector<ITensorHandle*>& outputs,
const armnn::Layer* layer,
@@ -156,7 +195,7 @@ void AsyncNetwork::CreateOutputTensorHandles(
tensorHandles.insert({guid, tensorHandleVectors});
}
-const IWorkloadFactory& AsyncNetwork::GetWorkloadFactory(const Layer& layer) const
+const IWorkloadFactory& AsyncNetworkImpl::GetWorkloadFactory(const Layer& layer) const
{
const IWorkloadFactory* workloadFactory = nullptr;
@@ -181,7 +220,9 @@ const IWorkloadFactory& AsyncNetwork::GetWorkloadFactory(const Layer& layer) con
return *workloadFactory;
}
-void AsyncNetwork::EnqueueInput(const BindableLayer& layer, const ConstTensor& inputTensor, WorkingMemHandle& context)
+void AsyncNetworkImpl::EnqueueInput(const BindableLayer& layer,
+ const ConstTensor& inputTensor,
+ WorkingMemHandle& context)
{
if (layer.GetType() != LayerType::Input)
{
@@ -232,7 +273,7 @@ void AsyncNetwork::EnqueueInput(const BindableLayer& layer, const ConstTensor& i
}
}
-void AsyncNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outputTensor, WorkingMemHandle& handle)
+void AsyncNetworkImpl::EnqueueOutput(const BindableLayer& layer, const Tensor& outputTensor, WorkingMemHandle& handle)
{
if (layer.GetType() != LayerType::Output)
{
@@ -304,7 +345,7 @@ void AsyncNetwork::EnqueueOutput(const BindableLayer& layer, const Tensor& outpu
}
}
-AsyncNetwork::AsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
+AsyncNetworkImpl::AsyncNetworkImpl(std::unique_ptr<IOptimizedNetwork> net,
const INetworkProperties& networkProperties,
profiling::ProfilingService& profilingService) :
m_OptimizedNetwork(std::move(net)),
@@ -421,7 +462,7 @@ AsyncNetwork::AsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
}
}
-Status AsyncNetwork::Execute(const InputTensors& inputTensors,
+Status AsyncNetworkImpl::Execute(const InputTensors& inputTensors,
const OutputTensors& outputTensors,
IWorkingMemHandle& iWorkingMemHandle)
{
@@ -529,12 +570,12 @@ Status AsyncNetwork::Execute(const InputTensors& inputTensors,
}
/// Get the profiler used for this network
-std::shared_ptr<IProfiler> AsyncNetwork::GetProfiler() const
+std::shared_ptr<IProfiler> AsyncNetworkImpl::GetProfiler() const
{
return m_Profiler;
}
-void AsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func)
+void AsyncNetworkImpl::RegisterDebugCallback(const DebugCallbackFunction& func)
{
for (auto&& workloadPtr: m_WorkloadQueue)
{
@@ -544,7 +585,7 @@ void AsyncNetwork::RegisterDebugCallback(const DebugCallbackFunction& func)
/// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
/// overlapped Execution by calling this function from different threads.
-std::unique_ptr<IWorkingMemHandle> AsyncNetwork::CreateWorkingMemHandle()
+std::unique_ptr<IWorkingMemHandle> AsyncNetworkImpl::CreateWorkingMemHandle()
{
Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
std::unordered_map<LayerGuid, std::vector<ITensorHandle*> > tensorHandles;
@@ -592,7 +633,7 @@ std::unique_ptr<IWorkingMemHandle> AsyncNetwork::CreateWorkingMemHandle()
return std::make_unique<WorkingMemHandle>(workingMemDescriptors, workingMemDescriptorMap);
}
-void AsyncNetwork::FreeWorkingMemory()
+void AsyncNetworkImpl::FreeWorkingMemory()
{
// Informs the memory managers to release memory in it's respective memory group
for (auto&& workloadFactory : m_WorkloadFactories)
diff --git a/src/armnn/AsyncNetwork.hpp b/src/armnn/AsyncNetwork.hpp
index 9c525c5472..9bdc7eebd7 100644
--- a/src/armnn/AsyncNetwork.hpp
+++ b/src/armnn/AsyncNetwork.hpp
@@ -29,35 +29,35 @@ namespace armnn
namespace experimental
{
-class AsyncNetwork final : public IAsyncNetwork
+class AsyncNetworkImpl final
{
public:
using WorkloadQueue = std::vector<std::unique_ptr<IWorkload>>;
- AsyncNetwork(std::unique_ptr<IOptimizedNetwork> net,
- const INetworkProperties &networkProperties,
- profiling::ProfilingService &profilingService);
+ AsyncNetworkImpl(std::unique_ptr<IOptimizedNetwork> net,
+ const INetworkProperties &networkProperties,
+ profiling::ProfilingService &profilingService);
- ~AsyncNetwork() { FreeWorkingMemory(); }
+ ~AsyncNetworkImpl() { FreeWorkingMemory(); }
- TensorInfo GetInputTensorInfo(LayerBindingId layerId) const override;
- TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const override;
+ TensorInfo GetInputTensorInfo(LayerBindingId layerId) const;
+ TensorInfo GetOutputTensorInfo(LayerBindingId layerId) const;
/// Thread safe execution of the network. Returns once execution is complete.
/// Will block until this and any other thread using the same workingMem object completes.
virtual Status Execute(const InputTensors& inputTensors,
const OutputTensors& outputTensors,
- IWorkingMemHandle& workingMemHandle) override;
+ IWorkingMemHandle& workingMemHandle);
/// Create a new unique WorkingMemHandle object. Create multiple handles if you wish to have
/// overlapped Execution by calling this function from different threads.
- std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle() override;
+ std::unique_ptr<IWorkingMemHandle> CreateWorkingMemHandle();
/// Get the profiler used for this network
- std::shared_ptr<IProfiler> GetProfiler() const override;
+ std::shared_ptr<IProfiler> GetProfiler() const;
/// Register a debug callback function to be used with this network
- void RegisterDebugCallback(const DebugCallbackFunction& func) override;
+ void RegisterDebugCallback(const DebugCallbackFunction& func);
private:
void FreeWorkingMemory();
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index 5dc1ef9cc5..57aaabd277 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -187,7 +187,7 @@ std::unique_ptr<IAsyncNetwork> RuntimeImpl::CreateAsyncNetwork(NetworkId& networ
context.second->BeforeLoadNetwork(networkIdOut);
}
- unique_ptr<AsyncNetwork> asyncNetwork = std::make_unique<AsyncNetwork>(
+ unique_ptr<IAsyncNetwork> asyncNetwork = std::make_unique<IAsyncNetwork>(
std::unique_ptr<IOptimizedNetwork>(rawNetwork),
networkProperties,
m_ProfilingService);