aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2022-06-30 17:13:04 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2022-07-13 08:31:46 +0000
commit21a6a1a5b72907573eade6d232bfaf45a4c14c52 (patch)
treef4003d0a60e907937e04d96fc434c0bfac596f86
parent8b0bee159b970c2aaffcdd22fa61c4106b5607e3 (diff)
downloadarmnn-21a6a1a5b72907573eade6d232bfaf45a4c14c52.tar.gz
IVGCVSW-6620 Update the async api to use ExecutionData
* ExecutionData holds a void* which can be assigned to data required for execution in a backend. WorkingMemDescriptors are used in the Ref backend which hold TensorHandles for inputs and outputs. * Updated ExecuteAsync functions to take ExecutionData. * Added CreateExecutionData and UpdateExectutionData to IBackendInternal. * Streamlined experimental IWorkingMemHandle API by removing map related function and unused m_workingMemDescriptorMap from WorkingMemHandle. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I54b0aab12872011743a141eb42dae200227769af
-rw-r--r--CMakeLists.txt1
-rw-r--r--include/armnn/IWorkingMemHandle.hpp12
-rw-r--r--include/armnn/backends/IBackendInternal.hpp25
-rw-r--r--include/armnn/backends/IWorkload.hpp4
-rw-r--r--include/armnn/backends/MemCopyWorkload.hpp4
-rw-r--r--include/armnn/backends/Workload.hpp10
-rw-r--r--src/armnn/ExecutionData.hpp21
-rw-r--r--src/armnn/LoadedNetwork.cpp47
-rw-r--r--src/armnn/WorkingMemHandle.cpp19
-rw-r--r--src/armnn/WorkingMemHandle.hpp32
-rw-r--r--src/backends/backendsCommon/MemCopyWorkload.cpp9
-rw-r--r--src/backends/backendsCommon/MemSyncWorkload.cpp10
-rw-r--r--src/backends/backendsCommon/MemSyncWorkload.hpp4
-rw-r--r--src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp34
-rw-r--r--src/backends/reference/RefBackend.cpp14
-rw-r--r--src/backends/reference/RefBackend.hpp6
-rw-r--r--src/backends/reference/workloads/RefActivationWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefActivationWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefArgMinMaxWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefArgMinMaxWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefCastWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefCastWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefChannelShuffleWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefChannelShuffleWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefComparisonWorkload.cpp9
-rw-r--r--src/backends/reference/workloads/RefComparisonWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConcatWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConcatWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConstantWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConvertFp16ToFp32Workload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConvertFp32ToFp16Workload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefConvolution3dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDequantizeWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefDequantizeWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp7
-rw-r--r--src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.hpp4
-rw-r--r--src/backends/reference/workloads/RefFillWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefFillWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefFloorWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefFloorWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefFullyConnectedWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefGatherNdWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefGatherNdWorkload.hpp2
-rw-r--r--src/backends/reference/workloads/RefGatherWorkload.cpp5
-rw-r--r--src/backends/reference/workloads/RefGatherWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefInstanceNormalizationWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefL2NormalizationWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefL2NormalizationWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefLstmWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefMeanWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefMeanWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefPermuteWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPooling2dWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefPooling2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPooling3dWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefPooling3dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefPreluWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefPreluWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefQLstmWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefRankWorkload.hpp7
-rw-r--r--src/backends/reference/workloads/RefReduceWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefReduceWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefReshapeWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefReshapeWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefResizeWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefResizeWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefShapeWorkload.hpp7
-rw-r--r--src/backends/reference/workloads/RefSliceWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefSliceWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefSoftmaxWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefSoftmaxWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefSplitterWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefSplitterWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefStackWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefStackWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefTransposeWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefTransposeWorkload.hpp4
-rw-r--r--src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp7
-rw-r--r--src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.hpp4
130 files changed, 525 insertions, 353 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f115f4686b..75064528c0 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -340,6 +340,7 @@ list(APPEND armnn_sources
src/armnn/DeviceSpec.hpp
src/armnn/DllExport.hpp
src/armnn/Exceptions.cpp
+ src/armnn/ExecutionData.hpp
src/armnn/ExecutionFrame.cpp
src/armnn/ExecutionFrame.hpp
src/armnn/Graph.cpp
diff --git a/include/armnn/IWorkingMemHandle.hpp b/include/armnn/IWorkingMemHandle.hpp
index bbc4913c59..62f71118eb 100644
--- a/include/armnn/IWorkingMemHandle.hpp
+++ b/include/armnn/IWorkingMemHandle.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -13,6 +13,8 @@ using NetworkId = int;
namespace experimental
{
+struct ExecutionData;
+
struct WorkingMemDescriptor;
class IWorkingMemHandle
@@ -33,12 +35,14 @@ public:
/// IsAllocated returns true if the backing memory is currently allocated.
virtual bool IsAllocated() = 0;
- /// Get the WorkingMemDescriptor for a Layer.
- virtual WorkingMemDescriptor& GetWorkingMemDescriptor(LayerGuid id) = 0;
-
/// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
/// the Workloads in a topologically sorted graph.
virtual WorkingMemDescriptor& GetWorkingMemDescriptorAt(unsigned int id) = 0;
+
+ /// Get the ExecutionData at an index.
+ /// The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it.
+ /// The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.
+ virtual std::pair<BackendId, ExecutionData>& GetExecutionDataAt(unsigned int id) = 0;
};
} // end experimental namespace
diff --git a/include/armnn/backends/IBackendInternal.hpp b/include/armnn/backends/IBackendInternal.hpp
index e393a7e1c5..a18adbac5a 100644
--- a/include/armnn/backends/IBackendInternal.hpp
+++ b/include/armnn/backends/IBackendInternal.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -9,7 +9,9 @@
#include <armnn/IRuntime.hpp>
#include <armnn/Deprecated.hpp>
+#include <ExecutionData.hpp>
#include <ISubgraphViewConverter.hpp>
+#include <WorkingMemDescriptor.hpp>
#include <armnn/backends/IBackendContext.hpp>
#include <armnn/backends/IMemoryManager.hpp>
@@ -205,6 +207,27 @@ public:
///
/// \return - Returns 0 if backend does not support caching otherwise number of files cached
virtual unsigned int GetNumberOfCacheFiles() const { return 0; }
+
+ /// Returns ExecutionData for the backend
+ ///
+ /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
+ /// \return - Returns backend specific ExecutionData generated for a layer
+ virtual ExecutionData CreateExecutionData(WorkingMemDescriptor& workingMemDescriptor) const
+ {
+ IgnoreUnused(workingMemDescriptor);
+ throw armnn::Exception("CreateExecutionData: Function has not been implemented in backend.");
+ };
+
+ /// Update the ExecutionData for a layer. It is used to swap in pre-imported tensor handles
+ ///
+ /// \param executionData - Backend specific ExecutionData generated for a layer
+ /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
+ virtual void UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const
+ {
+ IgnoreUnused(executionData);
+ IgnoreUnused(workingMemDescriptor);
+ throw armnn::Exception("UpdateExecutionData: Function has not been implemented in backend.");
+ };
};
using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp
index 22baf92c1a..78c0756945 100644
--- a/include/armnn/backends/IWorkload.hpp
+++ b/include/armnn/backends/IWorkload.hpp
@@ -13,7 +13,7 @@ namespace armnn
namespace experimental
{
-struct WorkingMemDescriptor;
+struct ExecutionData;
} // end experimental namespace
@@ -30,7 +30,7 @@ public:
virtual void Execute() const = 0;
- virtual void ExecuteAsync(WorkingMemDescriptor& desc) = 0;
+ virtual void ExecuteAsync(ExecutionData& executionData) = 0;
virtual arm::pipe::ProfilingGuid GetGuid() const = 0;
diff --git a/include/armnn/backends/MemCopyWorkload.hpp b/include/armnn/backends/MemCopyWorkload.hpp
index da23f52be6..ebf6077def 100644
--- a/include/armnn/backends/MemCopyWorkload.hpp
+++ b/include/armnn/backends/MemCopyWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -17,7 +17,7 @@ class CopyMemGenericWorkload : public BaseWorkload<MemCopyQueueDescriptor>
public:
CopyMemGenericWorkload(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& descriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>;
diff --git a/include/armnn/backends/Workload.hpp b/include/armnn/backends/Workload.hpp
index 6c9fcabd55..be6fbd977f 100644
--- a/include/armnn/backends/Workload.hpp
+++ b/include/armnn/backends/Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -8,6 +8,7 @@
#include "WorkloadData.hpp"
#include "WorkloadInfo.hpp"
#include "WorkingMemDescriptor.hpp"
+#include "ExecutionData.hpp"
#include <armnn/Logging.hpp>
@@ -40,14 +41,15 @@ public:
m_Data.Validate(info);
}
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override
+ void ExecuteAsync(ExecutionData& executionData) override
{
ARMNN_LOG(info) << "Using default async workload execution, this will network affect performance";
#if !defined(ARMNN_DISABLE_THREADS)
std::lock_guard<std::mutex> lockGuard(m_AsyncWorkloadMutex);
#endif
- m_Data.m_Inputs = workingMemDescriptor.m_Inputs;
- m_Data.m_Outputs = workingMemDescriptor.m_Outputs;
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ m_Data.m_Inputs = workingMemDescriptor->m_Inputs;
+ m_Data.m_Outputs = workingMemDescriptor->m_Outputs;
Execute();
};
diff --git a/src/armnn/ExecutionData.hpp b/src/armnn/ExecutionData.hpp
new file mode 100644
index 0000000000..69c8f3fa7a
--- /dev/null
+++ b/src/armnn/ExecutionData.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+namespace experimental
+{
+
+struct ExecutionData
+{
+ void* m_Data;
+};
+
+} // end experimental namespace
+
+} // end armnn namespace
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 8e664e699d..5dd7b6cd2a 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -9,12 +9,14 @@
#include "Profiling.hpp"
#include "HeapProfiling.hpp"
#include "WorkingMemHandle.hpp"
+#include "ExecutionData.hpp"
#include <armnn/BackendHelper.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
#include <armnn/backends/TensorHandle.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
#include <armnn/backends/IMemoryManager.hpp>
#include <armnn/backends/MemCopyWorkload.hpp>
@@ -642,9 +644,13 @@ void LoadedNetwork::AllocateAndExecuteConstantWorkloadsAsync()
m_ConstantTensorHandles[layer->GetGuid()] = tensorHandle;
tensorHandle->Allocate();
+ auto& backend = m_Backends.at(layer->GetBackendId());
+
WorkingMemDescriptor memDesc;
memDesc.m_Outputs.push_back(tensorHandle);
- m_ConstantWorkloads[layer->GetGuid()]->ExecuteAsync(memDesc);
+
+ ExecutionData executionData = backend->CreateExecutionData(memDesc);
+ m_ConstantWorkloads[layer->GetGuid()]->ExecuteAsync(executionData);
}
}
}
@@ -1717,15 +1723,15 @@ void LoadedNetwork::ClearImportedOutputs(const std::vector<ImportedOutputId> out
throw InvalidArgumentException(fmt::format("ClearImportedOutputs::Unknown ImportedOutputId: {}", id));
}
- auto& importedTensorHandle = m_PreImportedOutputHandles[id].m_TensorHandle;
- if (!importedTensorHandle)
- {
- throw InvalidArgumentException(
- fmt::format("ClearImportedOutputs::ImportedOutput with id: {} has already been deleted", id));
- }
- // Call Unimport then destroy the tensorHandle
- importedTensorHandle->Unimport();
- importedTensorHandle = {};
+ auto& importedTensorHandle = m_PreImportedOutputHandles[id].m_TensorHandle;
+ if (!importedTensorHandle)
+ {
+ throw InvalidArgumentException(
+ fmt::format("ClearImportedOutputs::ImportedOutput with id: {} has already been deleted", id));
+ }
+ // Call Unimport then destroy the tensorHandle
+ importedTensorHandle->Unimport();
+ importedTensorHandle = {};
}
}
@@ -1882,7 +1888,6 @@ Status LoadedNetwork::Execute(const InputTensors& inputTensors,
const auto& preimportedHandle = importedOutputPin.m_TensorHandle;
auto outputConnections = workingMemHandle.GetOutputConnection(layerBindingId);
-
for (auto it : outputConnections)
{
*it = preimportedHandle.get();
@@ -1895,7 +1900,7 @@ Status LoadedNetwork::Execute(const InputTensors& inputTensors,
ARMNN_LOG(error) << "An error occurred attempting to execute a workload: " << error.what();
executionSucceeded = false;
};
- ProfilingDynamicGuid workloadInferenceID(0);
+ ProfilingDynamicGuid workloadInferenceID(0);
try
{
@@ -1907,7 +1912,8 @@ Status LoadedNetwork::Execute(const InputTensors& inputTensors,
workloadInferenceID = timelineUtils->RecordWorkloadInferenceAndStartOfLifeEvent(workload->GetGuid(),
inferenceGuid);
}
- workload->ExecuteAsync(workingMemHandle.GetWorkingMemDescriptorAt(i));
+
+ workload->ExecuteAsync(workingMemHandle.GetExecutionDataAt(i).second);
if (timelineUtils)
{
@@ -1961,7 +1967,7 @@ std::unique_ptr<IWorkingMemHandle> LoadedNetwork::CreateWorkingMemHandle(Network
std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles;
std::vector<WorkingMemDescriptor> workingMemDescriptors;
- std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap;
+ std::vector<std::pair<BackendId, ExecutionData>> executionDataVec;
auto GetTensorHandle = [&](Layer* layer, const OutputSlot& outputSlot)
{
@@ -2142,13 +2148,19 @@ std::unique_ptr<IWorkingMemHandle> LoadedNetwork::CreateWorkingMemHandle(Network
handleInfo.m_InputMemDescriptorCoords.m_InputSlotCoords.emplace_back(connectionLocation);
}
}
- workingMemDescriptorMap.insert({layer->GetGuid(), workingMemDescriptor});
// Input/Output layers/workloads will not be executed, so the descriptor is not added to workingMemDescriptors
// However we will still need to manage the tensorHandle
if (!isInputLayer)
{
+ // Simply auto initialise ExecutionData here, so it's added only for the layer that require execution.
+ // The memory and data will be allocated/assigned for the void* in WorkingMemHandle::Allocate.
+ std::pair<BackendId, ExecutionData> dataPair;
+ dataPair.first = layer->GetBackendId();
+
+ executionDataVec.push_back(dataPair);
workingMemDescriptors.push_back(workingMemDescriptor);
+
layerIndex++;
}
}
@@ -2185,11 +2197,12 @@ std::unique_ptr<IWorkingMemHandle> LoadedNetwork::CreateWorkingMemHandle(Network
inputConnectionsInfo,
outputConnectionsInfo,
workingMemDescriptors,
- workingMemDescriptorMap,
std::move(externalMemoryManager),
std::move(tensorMemory),
std::move(managedTensorHandles),
- std::move(unmanagedTensorHandles));
+ std::move(unmanagedTensorHandles),
+ executionDataVec,
+ &m_Backends);
}
void LoadedNetwork::RegisterDebugCallback(const DebugCallbackFunction& func)
diff --git a/src/armnn/WorkingMemHandle.cpp b/src/armnn/WorkingMemHandle.cpp
index 70bdd59373..e800c428e6 100644
--- a/src/armnn/WorkingMemHandle.cpp
+++ b/src/armnn/WorkingMemHandle.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,20 +19,22 @@ WorkingMemHandle::WorkingMemHandle(NetworkId networkId,
std::vector<InputMemDescriptorCoords> inputLayerInfo,
std::vector<OutputMemDescriptorCoords> outputLayerInfo,
std::vector<WorkingMemDescriptor> workingMemDescriptors,
- std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap,
std::unique_ptr<MemoryManager> memoryManager,
std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory,
std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
- std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles)
+ std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles,
+ std::vector<std::pair<BackendId, ExecutionData>> executionDataVec,
+ BackendPtrMap* backends)
: m_NetworkId(networkId)
, m_WorkingMemDescriptors(workingMemDescriptors)
- , m_WorkingMemDescriptorMap(workingMemDescriptorMap)
, m_MemoryManager(std::move(memoryManager))
, m_TensorMemory(std::move(tensorMemory))
, m_ManagedTensorHandles(std::move(managedTensorHandles))
, m_UnmanagedTensorHandles(std::move(unmanagedTensorHandles))
, m_InputSize(numeric_cast<DifferenceType>(inputLayerInfo.size()))
, m_IsAllocated(false)
+ , m_ExecutionDataVec(executionDataVec)
+ , m_Backends(backends)
{
for (const auto& inputInfo : inputLayerInfo)
{
@@ -109,6 +111,15 @@ void WorkingMemHandle::Allocate()
{
m_ManagedTensorHandles[i]->Import(m_TensorMemory[i].first->m_Data, m_TensorMemory[i].second);
}
+
+ // Assign previously allocated ExecutionData. Needs to be assigned after allocation so the void* are allocated.
+ for (unsigned int i = 0; i < m_ExecutionDataVec.size(); ++i)
+ {
+ auto& backend = m_Backends->at(m_ExecutionDataVec[i].first);
+
+ ExecutionData executionData = backend->CreateExecutionData(GetWorkingMemDescriptorAt(i));
+ m_ExecutionDataVec[i].second = executionData;
+ }
}
void WorkingMemHandle::Free()
diff --git a/src/armnn/WorkingMemHandle.hpp b/src/armnn/WorkingMemHandle.hpp
index bca1d2d80c..dc62b9a2f5 100644
--- a/src/armnn/WorkingMemHandle.hpp
+++ b/src/armnn/WorkingMemHandle.hpp
@@ -1,16 +1,18 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
+#include "ExecutionData.hpp"
#include "Layer.hpp"
#include "Network.hpp"
#include "WorkingMemDescriptor.hpp"
#include <armnn/IWorkingMemHandle.hpp>
#include <armnn/Tensor.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
#include <unordered_map>
#include <mutex>
@@ -22,6 +24,7 @@ namespace armnn
namespace experimental
{
+using BackendPtrMap = std::unordered_map<BackendId, IBackendInternalUniquePtr>;
class WorkingMemHandle final : public IWorkingMemHandle
{
@@ -48,11 +51,12 @@ public:
std::vector<InputMemDescriptorCoords> inputLayerInfo,
std::vector<OutputMemDescriptorCoords> outputLayerInfo,
std::vector<WorkingMemDescriptor> workingMemDescriptors,
- std::unordered_map<LayerGuid, WorkingMemDescriptor> workingMemDescriptorMap,
std::unique_ptr<MemoryManager> memoryManager,
std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> tensorMemory,
std::vector<std::unique_ptr<ITensorHandle>> managedTensorHandles,
- std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles);
+ std::vector<std::unique_ptr<ITensorHandle>> unmanagedTensorHandles,
+ std::vector<std::pair<BackendId, ExecutionData>> executionDataVec,
+ BackendPtrMap* backends);
~WorkingMemHandle()
{ Free(); }
@@ -75,14 +79,6 @@ public:
return m_IsAllocated;
}
- /// Get the WorkingMemDescriptor for a Layer.
- WorkingMemDescriptor& GetWorkingMemDescriptor(LayerGuid id) override
- {
- auto result = m_WorkingMemDescriptorMap.find(id);
- ARMNN_ASSERT(result != m_WorkingMemDescriptorMap.end());
- return result->second;
- }
-
/// Get the WorkingMemDescriptor at an index. The WorkingMemDescriptors are stored in the same order as
/// the Workloads in a topologically sorted graph.
WorkingMemDescriptor& GetWorkingMemDescriptorAt(unsigned int id) override
@@ -90,6 +86,14 @@ public:
return m_WorkingMemDescriptors[id];
}
+ /// Get the ExecutionData at an index.
+ /// The ExecutionData is paired with a BackendId to be able to call backend specific functions upon it.
+ /// The ExecutionData are stored in the same order as the Workloads in a topologically sorted graph.
+ std::pair<BackendId, ExecutionData>& GetExecutionDataAt(unsigned int id) override
+ {
+ return m_ExecutionDataVec[id];
+ }
+
ITensorHandle* GetInputHandle(LayerBindingId layerBindingId) const
{
return m_InputHandleMap.at(layerBindingId);
@@ -129,14 +133,12 @@ private:
std::unordered_map<LayerBindingId, std::vector<std::vector<ITensorHandle*>::iterator>> m_OutputConnectionMap;
std::vector<WorkingMemDescriptor> m_WorkingMemDescriptors;
- std::unordered_map<LayerGuid, WorkingMemDescriptor> m_WorkingMemDescriptorMap;
std::unique_ptr<MemoryManager> m_MemoryManager;
// Memory to be imported into the tensorHandles after allocation
std::vector<std::pair<std::shared_ptr<TensorMemory>, MemorySource>> m_TensorMemory;
-
// Tensors that will need to be allocated internally within armnn
std::vector<std::unique_ptr<ITensorHandle>> m_ManagedTensorHandles;
@@ -151,6 +153,10 @@ private:
DifferenceType m_InputSize;
bool m_IsAllocated;
+
+ std::vector<std::pair<BackendId, ExecutionData>> m_ExecutionDataVec;
+
+ BackendPtrMap* m_Backends;
};
} // end experimental namespace
diff --git a/src/backends/backendsCommon/MemCopyWorkload.cpp b/src/backends/backendsCommon/MemCopyWorkload.cpp
index 09d0e6c7d1..3e0782aa80 100644
--- a/src/backends/backendsCommon/MemCopyWorkload.cpp
+++ b/src/backends/backendsCommon/MemCopyWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -63,11 +63,14 @@ void CopyMemGenericWorkload::Execute() const
}
}
-void CopyMemGenericWorkload::ExecuteAsync(WorkingMemDescriptor& descriptor)
+void CopyMemGenericWorkload::ExecuteAsync(ExecutionData& executionData)
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "CopyMemGeneric_Execute_WorkingMemDescriptor");
+
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
std::vector<TensorHandlePair> tensorHandlePairs;
- GatherTensorHandlePairs(descriptor, tensorHandlePairs);
+
+ GatherTensorHandlePairs(*workingMemDescriptor, tensorHandlePairs);
auto copyFunc = [](void* dst, const void* src, size_t size)
{
diff --git a/src/backends/backendsCommon/MemSyncWorkload.cpp b/src/backends/backendsCommon/MemSyncWorkload.cpp
index af68306557..79df2d23dc 100644
--- a/src/backends/backendsCommon/MemSyncWorkload.cpp
+++ b/src/backends/backendsCommon/MemSyncWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,11 +27,13 @@ void SyncMemGenericWorkload::Execute() const
m_TensorHandle->Unmap();
}
-void SyncMemGenericWorkload::ExecuteAsync(WorkingMemDescriptor& descriptor)
+void SyncMemGenericWorkload::ExecuteAsync(ExecutionData& executionData)
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "SyncMemGeneric_Execute_WorkingMemDescriptor");
- descriptor.m_Inputs[0]->Map(true);
- descriptor.m_Inputs[0]->Unmap();
+
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ workingMemDescriptor->m_Inputs[0]->Map(true);
+ workingMemDescriptor->m_Inputs[0]->Unmap();
}
} //namespace armnn
diff --git a/src/backends/backendsCommon/MemSyncWorkload.hpp b/src/backends/backendsCommon/MemSyncWorkload.hpp
index eb2651731c..df54a654c7 100644
--- a/src/backends/backendsCommon/MemSyncWorkload.hpp
+++ b/src/backends/backendsCommon/MemSyncWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -19,7 +19,7 @@ class SyncMemGenericWorkload : public BaseWorkload<MemSyncQueueDescriptor>
public:
SyncMemGenericWorkload(const MemSyncQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& descriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
ITensorHandle* m_TensorHandle;
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index b1ee89ac3c..0863ee45ca 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -46,13 +46,14 @@ struct Workload0 : BaseWorkload<ElementwiseUnaryQueueDescriptor>
}
}
- void ExecuteAsync(WorkingMemDescriptor& desc)
+ void ExecuteAsync(ExecutionData& executionData)
{
- int* inVals = static_cast<int*>(desc.m_Inputs[0][0].Map());
- int* outVals = static_cast<int*>(desc.m_Outputs[0][0].Map());
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ int* inVals = static_cast<int*>(workingMemDescriptor->m_Inputs[0][0].Map());
+ int* outVals = static_cast<int*>(workingMemDescriptor->m_Outputs[0][0].Map());
for (unsigned int i = 0;
- i < desc.m_Inputs[0][0].GetShape().GetNumElements();
+ i < workingMemDescriptor->m_Inputs[0][0].GetShape().GetNumElements();
++i)
{
outVals[i] = inVals[i] + outVals[i];
@@ -147,7 +148,10 @@ TEST_CASE("TestAsyncExecute")
workingMemDescriptor0.m_Inputs = std::vector<ITensorHandle*>{&asyncInput0};
workingMemDescriptor0.m_Outputs = std::vector<ITensorHandle*>{&asyncOutput0};
- workload0.get()->ExecuteAsync(workingMemDescriptor0);
+ ExecutionData executionData;
+ executionData.m_Data = &workingMemDescriptor0;
+
+ workload0.get()->ExecuteAsync(executionData);
// Inputs are also changed by the execute/executeAsync calls to make sure there is no interference with them
ValidateTensor(workingMemDescriptor0.m_Outputs[0], expectedExecuteAsyncval);
@@ -183,7 +187,10 @@ TEST_CASE("TestDefaultAsyncExecute")
workingMemDescriptor.m_Inputs = std::vector<ITensorHandle*>{&asyncInput};
workingMemDescriptor.m_Outputs = std::vector<ITensorHandle*>{&asyncOutput};
- workload1.get()->ExecuteAsync(workingMemDescriptor);
+ ExecutionData executionData;
+ executionData.m_Data = &workingMemDescriptor;
+
+ workload1.get()->ExecuteAsync(executionData);
// workload1 has no AsyncExecute implementation and so should use the default workload AsyncExecute
// implementation which will call workload1.Execute() in a thread safe manner
@@ -225,6 +232,8 @@ TEST_CASE("TestDefaultAsyncExeuteWithThreads")
workingMemDescriptor1.m_Inputs = std::vector<ITensorHandle*>{&asyncInput1};
workingMemDescriptor1.m_Outputs = std::vector<ITensorHandle*>{&asyncOutput1};
+ ExecutionData executionData1;
+ executionData1.m_Data = &workingMemDescriptor1;
ScopedTensorHandle asyncInput2(constInputTensor2);
ScopedTensorHandle asyncOutput2(constOutputTensor2);
@@ -233,16 +242,19 @@ TEST_CASE("TestDefaultAsyncExeuteWithThreads")
workingMemDescriptor2.m_Inputs = std::vector<ITensorHandle*>{&asyncInput2};
workingMemDescriptor2.m_Outputs = std::vector<ITensorHandle*>{&asyncOutput2};
+ ExecutionData executionData2;
+ executionData2.m_Data = &workingMemDescriptor2;
+
std::thread thread1 = std::thread([&]()
{
- workload.get()->ExecuteAsync(workingMemDescriptor1);
- workload.get()->ExecuteAsync(workingMemDescriptor1);
+ workload.get()->ExecuteAsync(executionData1);
+ workload.get()->ExecuteAsync(executionData1);
});
std::thread thread2 = std::thread([&]()
{
- workload.get()->ExecuteAsync(workingMemDescriptor2);
- workload.get()->ExecuteAsync(workingMemDescriptor2);
+ workload.get()->ExecuteAsync(executionData2);
+ workload.get()->ExecuteAsync(executionData2);
});
thread1.join();
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index c38d6b6710..a33a7756a0 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -103,4 +103,16 @@ std::unique_ptr<ICustomAllocator> RefBackend::GetDefaultAllocator() const
return std::make_unique<DefaultAllocator>();
}
+ExecutionData RefBackend::CreateExecutionData(WorkingMemDescriptor& workingMemDescriptor) const
+{
+ ExecutionData executionData;
+ executionData.m_Data = &workingMemDescriptor;
+ return executionData;
+}
+
+void RefBackend::UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const
+{
+ executionData.m_Data = &workingMemDescriptor;
+}
+
} // namespace armnn
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index da04f22d93..9828d09f51 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -62,6 +62,10 @@ public:
};
std::unique_ptr<ICustomAllocator> GetDefaultAllocator() const override;
+
+ ExecutionData CreateExecutionData(WorkingMemDescriptor& workingMemDescriptor) const override;
+
+ void UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const override;
};
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefActivationWorkload.cpp b/src/backends/reference/workloads/RefActivationWorkload.cpp
index 77958673e9..bdc637aa5e 100644
--- a/src/backends/reference/workloads/RefActivationWorkload.cpp
+++ b/src/backends/reference/workloads/RefActivationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,9 +20,10 @@ void RefActivationWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefActivationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefActivationWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefActivationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefActivationWorkload.hpp b/src/backends/reference/workloads/RefActivationWorkload.hpp
index 8dc2d52d9b..f09c928732 100644
--- a/src/backends/reference/workloads/RefActivationWorkload.hpp
+++ b/src/backends/reference/workloads/RefActivationWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefActivationWorkload : public RefBaseWorkload<ActivationQueueDescriptor>
public:
using RefBaseWorkload<ActivationQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
index d724273287..910ea73644 100644
--- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -24,9 +24,10 @@ void RefArgMinMaxWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefArgMinMaxWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefArgMinMaxWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefArgMinMaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp
index 97c4b45d60..000513b495 100644
--- a/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp
+++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
index a6bd986f1d..ed99c63b64 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,9 +27,10 @@ void RefBatchNormalizationWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefBatchNormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefBatchNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefBatchNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp b/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
index 60dd2a927c..88f0e3443a 100644
--- a/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
+++ b/src/backends/reference/workloads/RefBatchNormalizationWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
explicit RefBatchNormalizationWorkload(const BatchNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
index 441d2ba2cf..72c7a7687e 100644
--- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,9 +16,10 @@ void RefBatchToSpaceNdWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefBatchToSpaceNdWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefBatchToSpaceNdWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefBatchToSpaceNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp
index d7ee6fc81c..ac6aad3eb2 100644
--- a/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp
+++ b/src/backends/reference/workloads/RefBatchToSpaceNdWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
using RefBaseWorkload<BatchToSpaceNdQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefCastWorkload.cpp b/src/backends/reference/workloads/RefCastWorkload.cpp
index 8f2a7259f1..5dce5d9a86 100644
--- a/src/backends/reference/workloads/RefCastWorkload.cpp
+++ b/src/backends/reference/workloads/RefCastWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,9 +31,10 @@ void RefCastWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefCastWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefCastWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefCastWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefCastWorkload.hpp b/src/backends/reference/workloads/RefCastWorkload.hpp
index 6f7e56a6b6..39963c6c0e 100644
--- a/src/backends/reference/workloads/RefCastWorkload.hpp
+++ b/src/backends/reference/workloads/RefCastWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,7 @@ class RefCastWorkload : public RefBaseWorkload<CastQueueDescriptor>
public:
using RefBaseWorkload<CastQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
index 9f8514d009..8d317ba333 100644
--- a/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
+++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefChannelShuffleWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefChannelShuffleWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefChannelShuffleWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
// Reference implementation for channel shuffle taken from
diff --git a/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp b/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp
index b459b87592..c70361aa26 100644
--- a/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp
+++ b/src/backends/reference/workloads/RefChannelShuffleWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefChannelShuffleWorkload : public RefBaseWorkload<ChannelShuffleQueueDesc
public:
using RefBaseWorkload<ChannelShuffleQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefComparisonWorkload.cpp b/src/backends/reference/workloads/RefComparisonWorkload.cpp
index 433e3e8ad8..0ce83a99f3 100644
--- a/src/backends/reference/workloads/RefComparisonWorkload.cpp
+++ b/src/backends/reference/workloads/RefComparisonWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -47,11 +47,12 @@ void RefComparisonWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefComparisonWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefComparisonWorkload::ExecuteAsync(ExecutionData& executionData)
{
- PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ PostAllocationConfigure(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefComparisonWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefComparisonWorkload.hpp b/src/backends/reference/workloads/RefComparisonWorkload.hpp
index 93cfd1f2b1..325509ec8b 100644
--- a/src/backends/reference/workloads/RefComparisonWorkload.hpp
+++ b/src/backends/reference/workloads/RefComparisonWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,7 +21,7 @@ public:
RefComparisonWorkload(const ComparisonQueueDescriptor& descriptor, const WorkloadInfo& info);
void PostAllocationConfigure() override;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
diff --git a/src/backends/reference/workloads/RefConcatWorkload.cpp b/src/backends/reference/workloads/RefConcatWorkload.cpp
index c04c05354e..5aa8f037e5 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.cpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,9 +17,10 @@ void RefConcatWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConcatWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConcatWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConcatWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefConcatWorkload.hpp b/src/backends/reference/workloads/RefConcatWorkload.hpp
index 11d6d016ed..5175438675 100644
--- a/src/backends/reference/workloads/RefConcatWorkload.hpp
+++ b/src/backends/reference/workloads/RefConcatWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefConcatWorkload : public RefBaseWorkload<ConcatQueueDescriptor>
public:
using RefBaseWorkload<ConcatQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index 571dbb219a..937e5178bb 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,9 +25,10 @@ void RefConstantWorkload::Execute() const
Execute(m_Data.m_Outputs);
}
-void RefConstantWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConstantWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Outputs);
}
void RefConstantWorkload::Execute(std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefConstantWorkload.hpp b/src/backends/reference/workloads/RefConstantWorkload.hpp
index 181d79d320..e2f701a33b 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.hpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
RefConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
index 70e377d19b..2fe2eafb9b 100644
--- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefConvertBf16ToFp32Workload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvertBf16ToFp32Workload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConvertBf16ToFp32Workload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConvertBf16ToFp32Workload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
index 8b5c6d56c2..24dcb0f682 100644
--- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
+++ b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf1
public:
using BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>::BFloat16ToFloat32Workload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
index 347132d1f6..fa811e1a32 100644
--- a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefConvertFp16ToFp32Workload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvertFp16ToFp32Workload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConvertFp16ToFp32Workload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConvertFp16ToFp32Workload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.hpp b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.hpp
index feb442ef5a..b850866ce3 100644
--- a/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.hpp
+++ b/src/backends/reference/workloads/RefConvertFp16ToFp32Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefConvertFp16ToFp32Workload : public Float16ToFloat32Workload<ConvertFp16
public:
using Float16ToFloat32Workload<ConvertFp16ToFp32QueueDescriptor>::Float16ToFloat32Workload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp
index 7fe302a5ad..71ee95b2aa 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefConvertFp32ToBf16Workload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvertFp32ToBf16Workload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConvertFp32ToBf16Workload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConvertFp32ToBf16Workload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp
index cd3cfa4cf3..c1e57ec37e 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp3
public:
using Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>::Float32ToBFloat16Workload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
index be13458d89..4992e9c07a 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,9 +19,10 @@ void RefConvertFp32ToFp16Workload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvertFp32ToFp16Workload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefConvertFp32ToFp16Workload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConvertFp32ToFp16Workload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.hpp b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.hpp
index fe137ed62f..7950c6becc 100644
--- a/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.hpp
+++ b/src/backends/reference/workloads/RefConvertFp32ToFp16Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefConvertFp32ToFp16Workload : public Float32ToFloat16Workload<ConvertFp32
public:
using Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>::Float32ToFloat16Workload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index 3ddbdcebca..355d5262df 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,9 +35,10 @@ void RefConvolution2dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
+void RefConvolution2dWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
index f0d703786d..61c1eb6c0a 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
index f6a0ee285b..3ac7cd7286 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -37,9 +37,10 @@ void RefConvolution3dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefConvolution3dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
+void RefConvolution3dWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefConvolution3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
index b53f3a5f33..82236b9013 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index b0e19c5851..48b519f809 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,9 +21,10 @@ void RefDebugWorkload<DataType>::Execute() const
}
template<armnn::DataType DataType>
-void RefDebugWorkload<DataType>::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefDebugWorkload<DataType>::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs);
}
template<armnn::DataType DataType>
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index a1579599f4..91bc322048 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -30,7 +30,7 @@ public:
using TypedWorkload<DebugQueueDescriptor, DataType>::TypedWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
void RegisterDebugCallback(const DebugCallbackFunction& func) override;
diff --git a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
index 22e35f0ec5..cb1137847b 100644
--- a/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthToSpaceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,9 +16,10 @@ void RefDepthToSpaceWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefDepthToSpaceWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefDepthToSpaceWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefDepthToSpaceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp b/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp
index bd179d3b9c..e19d07dace 100644
--- a/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp
+++ b/src/backends/reference/workloads/RefDepthToSpaceWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -15,7 +15,7 @@ class RefDepthToSpaceWorkload : public RefBaseWorkload<DepthToSpaceQueueDescript
public:
using RefBaseWorkload<DepthToSpaceQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
index fd11ad1e03..c0677c9bf1 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -41,9 +41,10 @@ void RefDepthwiseConvolution2dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefDepthwiseConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefDepthwiseConvolution2dWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefDepthwiseConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
index 30ee6d8ace..f138000433 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "RefBaseWorkload.hpp"
@@ -17,7 +17,7 @@ public:
explicit RefDepthwiseConvolution2dWorkload(const DepthwiseConvolution2dQueueDescriptor &descriptor,
const WorkloadInfo &info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
index f9d80073b0..aa5ff6224a 100644
--- a/src/backends/reference/workloads/RefDequantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,9 +17,10 @@ void RefDequantizeWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefDequantizeWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefDequantizeWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefDequantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.hpp b/src/backends/reference/workloads/RefDequantizeWorkload.hpp
index 8fa8951677..97cd996d7e 100644
--- a/src/backends/reference/workloads/RefDequantizeWorkload.hpp
+++ b/src/backends/reference/workloads/RefDequantizeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
using RefBaseWorkload<DequantizeQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
index 5f01db3280..ba7933b177 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,9 +23,10 @@ void RefDetectionPostProcessWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefDetectionPostProcessWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefDetectionPostProcessWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefDetectionPostProcessWorkload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
index 53b2971063..87faa31ed4 100644
--- a/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
+++ b/src/backends/reference/workloads/RefDetectionPostProcessWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
explicit RefDetectionPostProcessWorkload(const DetectionPostProcessQueueDescriptor& descriptor,
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
index 3ea51b9f69..4bd5a51a52 100644
--- a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -35,10 +35,11 @@ void RefElementwiseUnaryWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefElementwiseUnaryWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefElementwiseUnaryWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefElementwiseUnaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp
index 91229b3c58..471c6ed9a7 100644
--- a/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseUnaryWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
RefElementwiseUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index d14ce075b0..344ca344e3 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,9 +33,10 @@ void RefElementwiseWorkload<Functor, ParentDescriptor, DebugString>::Execute() c
template <typename Functor, typename ParentDescriptor, typename armnn::StringMapping::Id DebugString>
void RefElementwiseWorkload<Functor, ParentDescriptor, DebugString>::ExecuteAsync(
- WorkingMemDescriptor &workingMemDescriptor)
+ ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
template <typename Functor, typename ParentDescriptor, typename armnn::StringMapping::Id DebugString>
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index 579e5def34..458b524c64 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,7 +23,7 @@ class RefElementwiseWorkload : public RefBaseWorkload<ParentDescriptor>
public:
RefElementwiseWorkload(const ParentDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
using InType = typename ElementwiseBinaryFunction<Functor>::InType;
diff --git a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
index b30811b8ed..828204fe07 100644
--- a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -31,9 +31,10 @@ void RefFakeQuantizationFloat32Workload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefFakeQuantizationFloat32Workload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefFakeQuantizationFloat32Workload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefFakeQuantizationFloat32Workload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.hpp b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.hpp
index 85dc6af326..9683c87126 100644
--- a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.hpp
+++ b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefFakeQuantizationFloat32Workload : public Float32Workload<FakeQuantizati
public:
using Float32Workload<FakeQuantizationQueueDescriptor>::Float32Workload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefFillWorkload.cpp b/src/backends/reference/workloads/RefFillWorkload.cpp
index ea1ca87caf..a0f0c6b30e 100644
--- a/src/backends/reference/workloads/RefFillWorkload.cpp
+++ b/src/backends/reference/workloads/RefFillWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,9 +19,10 @@ void RefFillWorkload::Execute() const
Execute(m_Data.m_Outputs);
}
-void RefFillWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefFillWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Outputs);
}
void RefFillWorkload::Execute(std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefFillWorkload.hpp b/src/backends/reference/workloads/RefFillWorkload.hpp
index d1e00581cd..5b0dcf7ac8 100644
--- a/src/backends/reference/workloads/RefFillWorkload.hpp
+++ b/src/backends/reference/workloads/RefFillWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefFillWorkload : public RefBaseWorkload<FillQueueDescriptor>
public:
using RefBaseWorkload<FillQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefFloorWorkload.cpp b/src/backends/reference/workloads/RefFloorWorkload.cpp
index e7bd50ddea..d02e529d04 100644
--- a/src/backends/reference/workloads/RefFloorWorkload.cpp
+++ b/src/backends/reference/workloads/RefFloorWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefFloorWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefFloorWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefFloorWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefFloorWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefFloorWorkload.hpp b/src/backends/reference/workloads/RefFloorWorkload.hpp
index 6237ff0c61..5f8298d8c6 100644
--- a/src/backends/reference/workloads/RefFloorWorkload.hpp
+++ b/src/backends/reference/workloads/RefFloorWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefFloorWorkload : public RefBaseWorkload<FloorQueueDescriptor>
public:
using RefBaseWorkload<FloorQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index 087fc9da68..734d7f3503 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,9 +39,10 @@ void RefFullyConnectedWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefFullyConnectedWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefFullyConnectedWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefFullyConnectedWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
index 3bdfb861d0..7f9438cbb5 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,7 +22,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefGatherNdWorkload.cpp b/src/backends/reference/workloads/RefGatherNdWorkload.cpp
index 4c6b559943..9a9478c3dc 100644
--- a/src/backends/reference/workloads/RefGatherNdWorkload.cpp
+++ b/src/backends/reference/workloads/RefGatherNdWorkload.cpp
@@ -18,9 +18,10 @@ void RefGatherNdWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefGatherNdWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefGatherNdWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefGatherNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefGatherNdWorkload.hpp b/src/backends/reference/workloads/RefGatherNdWorkload.hpp
index a0d91586cc..0be02bd915 100644
--- a/src/backends/reference/workloads/RefGatherNdWorkload.hpp
+++ b/src/backends/reference/workloads/RefGatherNdWorkload.hpp
@@ -15,7 +15,7 @@ class RefGatherNdWorkload : public RefBaseWorkload<GatherNdQueueDescriptor>
public:
using RefBaseWorkload<GatherNdQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefGatherWorkload.cpp b/src/backends/reference/workloads/RefGatherWorkload.cpp
index 8ad36e43b4..55a4c0961d 100644
--- a/src/backends/reference/workloads/RefGatherWorkload.cpp
+++ b/src/backends/reference/workloads/RefGatherWorkload.cpp
@@ -18,9 +18,10 @@ void RefGatherWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefGatherWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefGatherWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefGatherWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefGatherWorkload.hpp b/src/backends/reference/workloads/RefGatherWorkload.hpp
index ec880a5109..ff38a1d811 100644
--- a/src/backends/reference/workloads/RefGatherWorkload.hpp
+++ b/src/backends/reference/workloads/RefGatherWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,7 +21,7 @@ class RefGatherWorkload : public RefBaseWorkload<GatherQueueDescriptor>
public:
using RefBaseWorkload<GatherQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp
index c103a6b9d3..dd4fbf3ccd 100644
--- a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,9 +23,10 @@ void RefInstanceNormalizationWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefInstanceNormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefInstanceNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefInstanceNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.hpp b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.hpp
index a4b2dd39cb..3ae037541a 100644
--- a/src/backends/reference/workloads/RefInstanceNormalizationWorkload.hpp
+++ b/src/backends/reference/workloads/RefInstanceNormalizationWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
explicit RefInstanceNormalizationWorkload(const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
index f6fcff3cc5..bce8f245f5 100644
--- a/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -29,9 +29,10 @@ void RefL2NormalizationWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefL2NormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefL2NormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefL2NormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefL2NormalizationWorkload.hpp b/src/backends/reference/workloads/RefL2NormalizationWorkload.hpp
index c64e2ea0fd..4a56a04a80 100644
--- a/src/backends/reference/workloads/RefL2NormalizationWorkload.hpp
+++ b/src/backends/reference/workloads/RefL2NormalizationWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index ebe1b1ecfe..a21eb459a7 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,9 +22,10 @@ void RefLogSoftmaxWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefLogSoftmaxWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefLogSoftmaxWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefLogSoftmaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp
index 91ad5f6c36..098a9ee311 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefLogSoftmaxWorkload : public RefBaseWorkload<LogSoftmaxQueueDescriptor>
public:
using RefBaseWorkload<LogSoftmaxQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
index f0cb846acf..b132061008 100644
--- a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,9 +27,10 @@ void RefLogicalBinaryWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefLogicalBinaryWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefLogicalBinaryWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefLogicalBinaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp
index 797d937d80..498f80adbe 100644
--- a/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
RefLogicalBinaryWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
index ec0aa0e454..a84af442ab 100644
--- a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -27,9 +27,10 @@ void RefLogicalUnaryWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefLogicalUnaryWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefLogicalUnaryWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefLogicalUnaryWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp
index ebd5826cc5..e90135952c 100644
--- a/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp
+++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,7 +20,7 @@ public:
RefLogicalUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefLstmWorkload.cpp b/src/backends/reference/workloads/RefLstmWorkload.cpp
index 8609811253..3879051a5b 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -44,9 +44,10 @@ void RefLstmWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefLstmWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefLstmWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefLstmWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefLstmWorkload.hpp b/src/backends/reference/workloads/RefLstmWorkload.hpp
index 57526c9ba2..ad94e26159 100644
--- a/src/backends/reference/workloads/RefLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefLstmWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,7 +19,7 @@ public:
explicit RefLstmWorkload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefMeanWorkload.cpp b/src/backends/reference/workloads/RefMeanWorkload.cpp
index 23abaf8ff4..5d73a43a80 100644
--- a/src/backends/reference/workloads/RefMeanWorkload.cpp
+++ b/src/backends/reference/workloads/RefMeanWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,9 +23,10 @@ void RefMeanWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefMeanWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefMeanWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefMeanWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefMeanWorkload.hpp b/src/backends/reference/workloads/RefMeanWorkload.hpp
index c4c6a1261c..6c09f4bb76 100644
--- a/src/backends/reference/workloads/RefMeanWorkload.hpp
+++ b/src/backends/reference/workloads/RefMeanWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,7 +19,7 @@ class RefMeanWorkload : public RefBaseWorkload<MeanQueueDescriptor>
public:
explicit RefMeanWorkload (const MeanQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index 613868de57..40c9a6f449 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -166,9 +166,10 @@ void RefNormalizationWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefNormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.hpp b/src/backends/reference/workloads/RefNormalizationWorkload.hpp
index 5218e1e43a..f06563cb36 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.hpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index fd0728c8cd..9bc4efa919 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefPadWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefPadWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefPadWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefPadWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp
index c5871059cc..539ac4a4ad 100644
--- a/src/backends/reference/workloads/RefPadWorkload.hpp
+++ b/src/backends/reference/workloads/RefPadWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefPadWorkload : public RefBaseWorkload<PadQueueDescriptor>
public:
using RefBaseWorkload<PadQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.cpp b/src/backends/reference/workloads/RefPermuteWorkload.cpp
index f6af208e8a..e0e3b4fbd8 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.cpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,9 +20,10 @@ void RefPermuteWorkload<DataType>::Execute() const
}
template <armnn::DataType DataType>
-void RefPermuteWorkload<DataType>::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefPermuteWorkload<DataType>::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
template <armnn::DataType DataType>
diff --git a/src/backends/reference/workloads/RefPermuteWorkload.hpp b/src/backends/reference/workloads/RefPermuteWorkload.hpp
index d1e44520a1..c6b8e3b12d 100644
--- a/src/backends/reference/workloads/RefPermuteWorkload.hpp
+++ b/src/backends/reference/workloads/RefPermuteWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,7 +25,7 @@ public:
using TypedWorkload<PermuteQueueDescriptor, DataType>::m_Data;
using TypedWorkload<PermuteQueueDescriptor, DataType>::TypedWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefPooling2dWorkload.cpp b/src/backends/reference/workloads/RefPooling2dWorkload.cpp
index d337278fe1..9dc9a3568a 100644
--- a/src/backends/reference/workloads/RefPooling2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefPooling2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefPooling2dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefPooling2dWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefPooling2dWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefPooling2dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefPooling2dWorkload.hpp b/src/backends/reference/workloads/RefPooling2dWorkload.hpp
index a073e3921b..8da8e87277 100644
--- a/src/backends/reference/workloads/RefPooling2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefPooling2dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,7 +19,7 @@ public:
using RefBaseWorkload<Pooling2dQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefPooling3dWorkload.cpp b/src/backends/reference/workloads/RefPooling3dWorkload.cpp
index d1e00aa5f7..5f1eda2dab 100644
--- a/src/backends/reference/workloads/RefPooling3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefPooling3dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefPooling3dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefPooling3dWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefPooling3dWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefPooling3dWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefPooling3dWorkload.hpp b/src/backends/reference/workloads/RefPooling3dWorkload.hpp
index 92bc4766cf..6aa32ae75f 100644
--- a/src/backends/reference/workloads/RefPooling3dWorkload.hpp
+++ b/src/backends/reference/workloads/RefPooling3dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,7 +19,7 @@ public:
using RefBaseWorkload<Pooling3dQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefPreluWorkload.cpp b/src/backends/reference/workloads/RefPreluWorkload.cpp
index 94eeea1884..efe7a4c239 100644
--- a/src/backends/reference/workloads/RefPreluWorkload.cpp
+++ b/src/backends/reference/workloads/RefPreluWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,9 +23,10 @@ void RefPreluWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefPreluWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefPreluWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefPreluWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefPreluWorkload.hpp b/src/backends/reference/workloads/RefPreluWorkload.hpp
index 51ba2c15a7..b309dcf6d4 100644
--- a/src/backends/reference/workloads/RefPreluWorkload.hpp
+++ b/src/backends/reference/workloads/RefPreluWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
explicit RefPreluWorkload(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.cpp b/src/backends/reference/workloads/RefQLstmWorkload.cpp
index 74f5f1ef4c..398faa9074 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,9 +48,10 @@ void RefQLstmWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefQLstmWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefQLstmWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefQLstmWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefQLstmWorkload.hpp b/src/backends/reference/workloads/RefQLstmWorkload.hpp
index 0e64a38ac9..2c56d9c30a 100644
--- a/src/backends/reference/workloads/RefQLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefQLstmWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -19,7 +19,7 @@ public:
explicit RefQLstmWorkload(const QLstmQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index 10ef0e5e15..e54ab456cd 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -39,9 +39,10 @@ void RefQuantizeWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefQuantizeWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefQuantizeWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefQuantizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.hpp b/src/backends/reference/workloads/RefQuantizeWorkload.hpp
index e38241067d..1aba32c425 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.hpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ class RefQuantizeWorkload : public RefBaseWorkload<QuantizeQueueDescriptor>
public:
RefQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo &info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefRankWorkload.hpp b/src/backends/reference/workloads/RefRankWorkload.hpp
index 000828f9e4..48109529f0 100644
--- a/src/backends/reference/workloads/RefRankWorkload.hpp
+++ b/src/backends/reference/workloads/RefRankWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,9 +22,10 @@ public:
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override
+ void ExecuteAsync(ExecutionData& executionData) override
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
private:
diff --git a/src/backends/reference/workloads/RefReduceWorkload.cpp b/src/backends/reference/workloads/RefReduceWorkload.cpp
index 62881daaf7..e7d05cadd0 100644
--- a/src/backends/reference/workloads/RefReduceWorkload.cpp
+++ b/src/backends/reference/workloads/RefReduceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Samsung Electronics Co Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,9 +23,10 @@ void RefReduceWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefReduceWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefReduceWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefReduceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefReduceWorkload.hpp b/src/backends/reference/workloads/RefReduceWorkload.hpp
index d759bc2ef1..261193272f 100644
--- a/src/backends/reference/workloads/RefReduceWorkload.hpp
+++ b/src/backends/reference/workloads/RefReduceWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Samsung Electronics Co Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefReshapeWorkload.cpp b/src/backends/reference/workloads/RefReshapeWorkload.cpp
index 960d591fec..a93645e2ea 100644
--- a/src/backends/reference/workloads/RefReshapeWorkload.cpp
+++ b/src/backends/reference/workloads/RefReshapeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,9 +17,10 @@ void RefReshapeWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefReshapeWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefReshapeWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefReshapeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefReshapeWorkload.hpp b/src/backends/reference/workloads/RefReshapeWorkload.hpp
index 7596685336..94fb3a11ab 100644
--- a/src/backends/reference/workloads/RefReshapeWorkload.hpp
+++ b/src/backends/reference/workloads/RefReshapeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefReshapeWorkload : public RefBaseWorkload<ReshapeQueueDescriptor>
public:
using RefBaseWorkload<ReshapeQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefResizeWorkload.cpp b/src/backends/reference/workloads/RefResizeWorkload.cpp
index d7a82b8f34..39a2a29878 100644
--- a/src/backends/reference/workloads/RefResizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefResizeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,9 +22,10 @@ void RefResizeWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefResizeWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefResizeWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefResizeWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefResizeWorkload.hpp b/src/backends/reference/workloads/RefResizeWorkload.hpp
index f7747193ec..27ae48c918 100644
--- a/src/backends/reference/workloads/RefResizeWorkload.hpp
+++ b/src/backends/reference/workloads/RefResizeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefResizeWorkload : public RefBaseWorkload<ResizeQueueDescriptor>
public:
using RefBaseWorkload<ResizeQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefShapeWorkload.hpp b/src/backends/reference/workloads/RefShapeWorkload.hpp
index b7ed761e0c..bc4d50ac92 100644
--- a/src/backends/reference/workloads/RefShapeWorkload.hpp
+++ b/src/backends/reference/workloads/RefShapeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,9 +21,10 @@ public:
{
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override
+ void ExecuteAsync(ExecutionData& executionData) override
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
private:
diff --git a/src/backends/reference/workloads/RefSliceWorkload.cpp b/src/backends/reference/workloads/RefSliceWorkload.cpp
index f94a83ee2c..60c3950c32 100644
--- a/src/backends/reference/workloads/RefSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,9 +18,10 @@ void RefSliceWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefSliceWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefSliceWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefSliceWorkload.hpp b/src/backends/reference/workloads/RefSliceWorkload.hpp
index b9dca86c4e..8b99bc4bc0 100644
--- a/src/backends/reference/workloads/RefSliceWorkload.hpp
+++ b/src/backends/reference/workloads/RefSliceWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
using RefBaseWorkload<SliceQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
index 9733cbc859..f2579ce388 100644
--- a/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,9 +22,10 @@ void RefSoftmaxWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefSoftmaxWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefSoftmaxWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefSoftmaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefSoftmaxWorkload.hpp b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
index cac102a2bb..89d2c9ee9a 100644
--- a/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
+++ b/src/backends/reference/workloads/RefSoftmaxWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,7 +16,7 @@ class RefSoftmaxWorkload : public RefBaseWorkload<SoftmaxQueueDescriptor>
public:
using RefBaseWorkload<SoftmaxQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp
index e35632db5b..6aa422afdc 100644
--- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp
+++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,9 +17,10 @@ void RefSpaceToBatchNdWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefSpaceToBatchNdWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefSpaceToBatchNdWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefSpaceToBatchNdWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp
index eb2d93fb86..f2c87682db 100644
--- a/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp
+++ b/src/backends/reference/workloads/RefSpaceToBatchNdWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -16,7 +16,7 @@ class RefSpaceToBatchNdWorkload : public RefBaseWorkload<SpaceToBatchNdQueueDesc
public:
using RefBaseWorkload<SpaceToBatchNdQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
index 88faf7a790..e8dd052e94 100644
--- a/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
+++ b/src/backends/reference/workloads/RefSpaceToDepthWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,9 +17,10 @@ void RefSpaceToDepthWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefSpaceToDepthWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefSpaceToDepthWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefSpaceToDepthWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp b/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp
index 17f8d2f61e..79e888d6ed 100644
--- a/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp
+++ b/src/backends/reference/workloads/RefSpaceToDepthWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -16,7 +16,7 @@ class RefSpaceToDepthWorkload : public RefBaseWorkload<SpaceToDepthQueueDescript
public:
using RefBaseWorkload<SpaceToDepthQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefSplitterWorkload.cpp b/src/backends/reference/workloads/RefSplitterWorkload.cpp
index 076aefe517..93b393b243 100644
--- a/src/backends/reference/workloads/RefSplitterWorkload.cpp
+++ b/src/backends/reference/workloads/RefSplitterWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -16,9 +16,10 @@ void RefSplitterWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefSplitterWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefSplitterWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefSplitterWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefSplitterWorkload.hpp b/src/backends/reference/workloads/RefSplitterWorkload.hpp
index 0b72bb9fdc..0beaaf9c72 100644
--- a/src/backends/reference/workloads/RefSplitterWorkload.hpp
+++ b/src/backends/reference/workloads/RefSplitterWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -18,7 +18,7 @@ class RefSplitterWorkload : public RefBaseWorkload<SplitterQueueDescriptor>
public:
using RefBaseWorkload<SplitterQueueDescriptor>::RefBaseWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp
index f57e6e0f1e..e35c2d52c6 100644
--- a/src/backends/reference/workloads/RefStackWorkload.cpp
+++ b/src/backends/reference/workloads/RefStackWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,9 +23,10 @@ void RefStackWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefStackWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefStackWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefStackWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefStackWorkload.hpp b/src/backends/reference/workloads/RefStackWorkload.hpp
index 19f4a7be67..d413c7d91f 100644
--- a/src/backends/reference/workloads/RefStackWorkload.hpp
+++ b/src/backends/reference/workloads/RefStackWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -17,7 +17,7 @@ public:
explicit RefStackWorkload(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index 41fe4c3a1c..f5ca0c18d7 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,9 +20,10 @@ void RefStridedSliceWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefStridedSliceWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefStridedSliceWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefStridedSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.hpp b/src/backends/reference/workloads/RefStridedSliceWorkload.hpp
index ea443cf80d..f8a8f8d0e9 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.hpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -15,7 +15,7 @@ class RefStridedSliceWorkload : public RefBaseWorkload<StridedSliceQueueDescript
public:
RefStridedSliceWorkload(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
index 64a2d4c7b2..1269b3ff04 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -38,9 +38,10 @@ void RefTransposeConvolution2dWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefTransposeConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefTransposeConvolution2dWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefTransposeConvolution2dWorkload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
index 6bcee9a838..1d66698b93 100644
--- a/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefTransposeConvolution2dWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,7 +22,7 @@ public:
~RefTransposeConvolution2dWorkload() = default;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.cpp b/src/backends/reference/workloads/RefTransposeWorkload.cpp
index 828badd042..6c94e7d2c8 100644
--- a/src/backends/reference/workloads/RefTransposeWorkload.cpp
+++ b/src/backends/reference/workloads/RefTransposeWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -20,9 +20,10 @@ void RefTransposeWorkload<DataType>::Execute() const
}
template <armnn::DataType DataType>
-void RefTransposeWorkload<DataType>::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
+void RefTransposeWorkload<DataType>::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
template <armnn::DataType DataType>
diff --git a/src/backends/reference/workloads/RefTransposeWorkload.hpp b/src/backends/reference/workloads/RefTransposeWorkload.hpp
index b8c3649745..db4f683699 100644
--- a/src/backends/reference/workloads/RefTransposeWorkload.hpp
+++ b/src/backends/reference/workloads/RefTransposeWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,7 +25,7 @@ public:
using TypedWorkload<TransposeQueueDescriptor, DataType>::m_Data;
using TypedWorkload<TransposeQueueDescriptor, DataType>::TypedWorkload;
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private:
void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
};
diff --git a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp
index c4345d4978..23022d076c 100644
--- a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp
+++ b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -48,9 +48,10 @@ void RefUnidirectionalSequenceLstmWorkload::Execute() const
Execute(m_Data.m_Inputs, m_Data.m_Outputs);
}
-void RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
+void RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(ExecutionData& executionData)
{
- Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
+ WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
+ Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
}
void RefUnidirectionalSequenceLstmWorkload::Execute(std::vector<ITensorHandle*> inputs,
diff --git a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.hpp b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.hpp
index 7a91cee642..ad2b862efd 100644
--- a/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.hpp
+++ b/src/backends/reference/workloads/RefUnidirectionalSequenceLstmWorkload.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -23,7 +23,7 @@ public:
const WorkloadInfo& info);
void Execute() const override;
- void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor) override;
+ void ExecuteAsync(ExecutionData& executionData) override;
private: