diff options
author | Finn Williams <Finn.Williams@arm.com> | 2021-04-07 10:23:21 +0100 |
---|---|---|
committer | Finn Williams <Finn.Williams@arm.com> | 2021-04-14 15:18:38 +0100 |
commit | b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0 (patch) | |
tree | 04cc91a6efb7e2601f80e4213a747938165b7184 /src/backends/reference/workloads/RefArgMinMaxWorkload.cpp | |
parent | b898222a8856475f0217be5e78b4816aa1914f15 (diff) | |
download | armnn-b8181f72b8c7c9132373dbcf7f8709ec2c0f23c0.tar.gz |
IVGCVSW-5787 Add/Update Execute() implementations in RefActivationWorkload
* Added multithreaded StridedSliceEndToEndTest
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I4579db7b5959e0a22256f1bda00238c22e611dec
Diffstat (limited to 'src/backends/reference/workloads/RefArgMinMaxWorkload.cpp')
-rw-r--r-- | src/backends/reference/workloads/RefArgMinMaxWorkload.cpp | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp index bf8649f54d..77167a866b 100644 --- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp +++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp @@ -18,16 +18,27 @@ RefArgMinMaxWorkload::RefArgMinMaxWorkload( const WorkloadInfo& info) : BaseWorkload<ArgMinMaxQueueDescriptor>(descriptor, info) {} + void RefArgMinMaxWorkload::Execute() const { + Execute(m_Data.m_Inputs, m_Data.m_Outputs); +} + +void RefArgMinMaxWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) +{ + Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs); +} + +void RefArgMinMaxWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const +{ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefArgMinMaxWorkload_Execute"); - const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo &inputTensorInfo = GetTensorInfo(inputs[0]); - std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, m_Data.m_Inputs[0]->Map()); + std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputTensorInfo, inputs[0]->Map()); Decoder<float> &decoder = *decoderPtr; - const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); + const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]); if (outputTensorInfo.GetDataType() == armnn::DataType::Signed32) { int32_t *output = GetOutputTensorData<int32_t>(0, m_Data); |