aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2022-06-30 17:13:04 +0100
committerNikhil Raj <nikhil.raj@arm.com>2022-07-27 15:52:10 +0100
commit2d213a759e68f753ef4696e02a8535f7edfe421d (patch)
treef4003d0a60e907937e04d96fc434c0bfac596f86 /include
parent28aa6691accfd78c5eb5c4356316220d0e82ddef (diff)
downloadarmnn-2d213a759e68f753ef4696e02a8535f7edfe421d.tar.gz
IVGCVSW-6620 Update the async api to use ExecutionData
* ExecutionData holds a void* which can be assigned to data required for execution in a backend. WorkingMemDescriptors are used in the Ref backend which hold TensorHandles for inputs and outputs. * Updated ExecuteAsync functions to take ExecutionData. * Added CreateExecutionData and UpdateExectutionData to IBackendInternal. * Streamlined experimental IWorkingMemHandle API by removing map related function and unused m_workingMemDescriptorMap from WorkingMemHandle. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I54b0aab12872011743a141eb42dae200227769af
Diffstat (limited to 'include')
-rw-r--r--include/armnn/IWorkingMemHandle.hpp12
-rw-r--r--include/armnn/backends/IBackendInternal.hpp25
-rw-r--r--include/armnn/backends/IWorkload.hpp4
-rw-r--r--include/armnn/backends/MemCopyWorkload.hpp4
-rw-r--r--include/armnn/backends/Workload.hpp10
5 files changed, 42 insertions, 13 deletions
diff --git a/include/armnn/IWorkingMemHandle.hpp b/include/armnn/IWorkingMemHandle.hpp
index bbc4913c59..62f71118eb 100644
--- a/include/armnn/IWorkingMemHandle.hpp
+++ b/include/armnn/IWorkingMemHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,6 +13,8 @@ using NetworkId = int;
namespace experimental
{
+struct ExecutionData;
+
struct WorkingMemDescriptor;
class IWorkingMemHandle
@@ -33,12 +35,14 @@ public:
/// IsAllocated returns true if the backing memory is currently allocated.
virtual bool IsAllocated() = 0;
- /// Get the WorkingMemDescriptor for a Layer.
- virtual WorkingMemDescriptor& GetWorkingMemDescriptor(LayerGuid id) = 0;
-
/// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
/// the Workloads in a topologically sorted graph.
virtual WorkingMemDescriptor& GetWorkingMemDescriptorAt(unsigned int id) = 0;
+
+ /// Get the ExecutionData at an index.
+ /// The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it.
+ /// The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.
+ virtual std::pair<BackendId, ExecutionData>& GetExecutionDataAt(unsigned int id) = 0;
};
} // end experimental namespace
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index e393a7e1c5..a18adbac5a 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,7 +9,9 @@
#include <armnn/IRuntime.hpp>
#include <armnn/Deprecated.hpp>
+#include <ExecutionData.hpp>
#include <ISubgraphViewConverter.hpp>
+#include <WorkingMemDescriptor.hpp>
#include <armnn/backends/IBackendContext.hpp>
#include <armnn/backends/IMemoryManager.hpp>
@@ -205,6 +207,27 @@ public:
///
/// \return - Returns 0 if backend does not support caching otherwise number of files cached
virtual unsigned int GetNumberOfCacheFiles() const { return 0; }
+
+ /// Returns ExecutionData for the backend
+ ///
+ /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
+ /// \return - Returns backend specific ExecutionData generated for a layer
+ virtual ExecutionData CreateExecutionData(WorkingMemDescriptor& workingMemDescriptor) const
+ {
+ IgnoreUnused(workingMemDescriptor);
+ throw armnn::Exception("CreateExecutionData: Function has not been implemented in backend.");
+ };
+
+ /// Update the ExecutionData for a layer. It is used to swap in pre-imported tensor handles
+ ///
+ /// \param executionData - Backend specific ExecutionData generated for a layer
+ /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
+ virtual void UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const
+ {
+ IgnoreUnused(executionData);
+ IgnoreUnused(workingMemDescriptor);
+ throw armnn::Exception("UpdateExecutionData: Function has not been implemented in backend.");
+ };
};
using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp
index 22baf92c1a..78c0756945 100644
--- a/include/armnn/backends/IWorkload.hpp
+++ b/include/armnn/backends/IWorkload.hpp
@@ -13,7 +13,7 @@ namespace armnn
namespace experimental
{
-struct WorkingMemDescriptor;
+struct ExecutionData;
} // end experimental namespace
@@ -30,7 +30,7 @@ public:
virtual void Execute() const = 0;
- virtual void ExecuteAsync(WorkingMemDescriptor& desc) = 0;
+ virtual void ExecuteAsync(ExecutionData& executionData) = 0;
virtual arm::pipe::ProfilingGuid GetGuid() const = 0;
diff --git a/include/armnn/backends/MemCopyWorkload.hpp b/include/armnn/backends/MemCopyWorkload.hpp
index da23f52be6..ebf6077def 100644
--- a/include/armnn/backends/MemCopyWorkload.hpp
+++ b/include/armnn/backends/MemCopyWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -17,7 +17,7 @@ class CopyMemGenericWorkload : public BaseWorkload<MemCopyQueueDescriptor>
public:
CopyMemGenericWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& descriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
diff --git a/include/armnn/backends/Workload.hpp b/include/armnn/backends/Workload.hpp
index 6c9fcabd55..be6fbd977f 100644
--- a/include/armnn/backends/Workload.hpp
+++ b/include/armnn/backends/Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -8,6 +8,7 @@
#include "WorkloadData.hpp"
#include "WorkloadInfo.hpp"
#include "WorkingMemDescriptor.hpp"
+#include "ExecutionData.hpp"
#include <armnn/Logging.hpp>
@@ -40,14 +41,15 @@ public:
m_Data.Validate(info);
}
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override
+ void ExecuteAsync(ExecutionData& executionData) override
{
ARMNN_LOG(info) << "Using default async workload execution, this will network affect performance";
#if !defined(ARMNN_DISABLE_THREADS)
std::lock_guard<std::mutex> lockGuard(m_AsyncWorkloadMutex);
#endif
- m_Data.m_Inputs = workingMemDescriptor.m_Inputs;
- m_Data.m_Outputs = workingMemDescriptor.m_Outputs;
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ m_Data.m_Inputs = workingMemDescriptor->m_Inputs;
+ m_Data.m_Outputs = workingMemDescriptor->m_Outputs;
Execute();
};