From 68c2c903e94a96b1663cffa3e5981a5869dc8b92 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Thu, 19 Sep 2019 11:21:11 +0100 Subject: IVGCVSW-3723 Adding reference workload support for ArgMinMax Change-Id: I65209ecec4e3abf808163239748d6e830568c2e3 Signed-off-by: Nikhil Raj --- .../test/layerTests/ArgMinMaxTestImpl.cpp | 252 +++++++++++++++++++++ .../test/layerTests/ArgMinMaxTestImpl.hpp | 29 +++ 2 files changed, 281 insertions(+) create mode 100644 src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp (limited to 'src/backends/backendsCommon/test/layerTests') diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp new file mode 100644 index 0000000000..9c07599387 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -0,0 +1,252 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ArgMinMaxTestImpl.hpp" + +#include + +#include +#include +#include + +#include + +namespace +{ + +template> +LayerTestResult ArgMinMaxTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::ArgMinMaxFunction argMinMaxFunction, + const armnn::TensorInfo inputTensorInfo, + const armnn::TensorInfo outputTensorInfo, + const std::vector& inputData, + const std::vector& outputData, + int axis = 3) +{ + auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); + + LayerTestResult result(outputTensorInfo); + + result.outputExpected = MakeTensor(outputTensorInfo, outputData); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ArgMinMaxQueueDescriptor descriptor; + descriptor.m_Parameters.m_Function = argMinMaxFunction; + descriptor.m_Parameters.m_Axis = axis; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateArgMinMax(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); + + return result; + +} + +} // namespace + +template +LayerTestResult ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector outputValues({ 3 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Max, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 3); +} + +template +LayerTestResult ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector outputValues({ 1 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Min, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 3); +} + +template +LayerTestResult ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4}; + const armnn::TensorShape outputShape{ 1, 2, 4 }; // C=1,2,4 H =1,3,4 W=1,3,2 + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 100.0f, 200.0f, 300.0f, 400.0f, + 500.0f, 600.0f, 700.0f, 800.0f }); + + std::vector outputValues({ 0, 0, 0, 0, + 0, 0, 0, 0 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Min, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 1); +} + +template +LayerTestResult ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4}; + const armnn::TensorShape outputShape{ 1, 2, 4 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 100.0f, 200.0f, 300.0f, 400.0f, + 500.0f, 600.0f, 700.0f, 800.0f }); + + std::vector outputValues({ 2, 2, 2, 2, + 2, 2, 2, 2 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Max, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 1); +} + + +// Explicit template specializations + +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp new file mode 100644 index 0000000000..79d77d41b2 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include +#include + +template> +LayerTestResult ArgMaxSimpleTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template> +LayerTestResult ArgMinSimpleTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template> +LayerTestResult ArgMinChannel4dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template> +LayerTestResult ArgMaxChannel4dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file -- cgit v1.2.1