From 01097941ef85073c56cbd1d5f00d7e8ffeb9876d Mon Sep 17 00:00:00 2001 From: Finn Williams Date: Mon, 26 Apr 2021 12:06:34 +0100 Subject: IVGCVSW-5843 Separate memory managers for WorkingMemHandles * Add inter layer memory management to WorkingMemHandle * Change Const layers to be executed once in loadedNetworkConstruction and share tensorHandle between all WorkingMemHandles * Fix various reference workloads pointing to memory in the queueDescriptor Signed-off-by: Finn Williams Change-Id: I69d4b3c5c84d2f5abe4540c3e624ab4f00d88226 --- src/backends/reference/workloads/RefArgMinMaxWorkload.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/backends/reference/workloads/RefArgMinMaxWorkload.cpp') diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp index 77167a866b..2d635bf6c2 100644 --- a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp +++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp @@ -41,11 +41,11 @@ void RefArgMinMaxWorkload::Execute(std::vector inputs, std::vect const TensorInfo &outputTensorInfo = GetTensorInfo(outputs[0]); if (outputTensorInfo.GetDataType() == armnn::DataType::Signed32) { - int32_t *output = GetOutputTensorData(0, m_Data); + int32_t *output = GetOutputTensorData(outputs[0]); ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function, m_Data.m_Parameters.m_Axis); } else { - int64_t *output = GetOutputTensorData(0, m_Data); + int64_t *output = GetOutputTensorData(outputs[0]); ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function, m_Data.m_Parameters.m_Axis); } -- cgit v1.2.1