aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2019-09-19 11:21:11 +0100
committerNikhil Raj <nikhil.raj@arm.com>2019-09-19 11:21:11 +0100
commit68c2c903e94a96b1663cffa3e5981a5869dc8b92 (patch)
treecdfd3df18383ac4743f8175f1e7caf03779caadd /src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
parent95cf225f58a985fd96ef23fb77bd685f609dd18e (diff)
downloadarmnn-68c2c903e94a96b1663cffa3e5981a5869dc8b92.tar.gz
IVGCVSW-3723 Adding reference workload support for ArgMinMax
Change-Id: I65209ecec4e3abf808163239748d6e830568c2e3 Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp252
1 files changed, 252 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
new file mode 100644
index 0000000000..9c07599387
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -0,0 +1,252 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ArgMinMaxTestImpl.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::ArgMinMaxFunction argMinMaxFunction,
+ const armnn::TensorInfo inputTensorInfo,
+ const armnn::TensorInfo outputTensorInfo,
+ const std::vector<float>& inputData,
+ const std::vector<int32_t>& outputData,
+ int axis = 3)
+{
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
+
+ LayerTestResult<int32_t, 3> result(outputTensorInfo);
+
+ result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ArgMinMaxQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Function = argMinMaxFunction;
+ descriptor.m_Parameters.m_Axis = axis;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+
+ return result;
+
+}
+
+} // namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 3> ArgMaxSimpleTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
+ const armnn::TensorShape outputShape{ 1, 1, 1 };
+
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ inputTensorInfo.SetQuantizationOffset(0);
+ }
+
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+ std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
+ std::vector<int32_t> outputValues({ 3 });
+
+ return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
+ armnn::ArgMinMaxFunction::Max,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, outputValues, 3);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 3> ArgMinSimpleTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
+ const armnn::TensorShape outputShape{ 1, 1, 1 };
+
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ inputTensorInfo.SetQuantizationOffset(0);
+ }
+
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+ std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
+ std::vector<int32_t> outputValues({ 1 });
+
+ return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
+ armnn::ArgMinMaxFunction::Min,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, outputValues, 3);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 3> ArgMinChannel4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4};
+ const armnn::TensorShape outputShape{ 1, 2, 4 }; // C=1,2,4 H =1,3,4 W=1,3,2
+
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ inputTensorInfo.SetQuantizationOffset(0);
+ }
+
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+ std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
+
+ 100.0f, 200.0f, 300.0f, 400.0f,
+ 500.0f, 600.0f, 700.0f, 800.0f });
+
+ std::vector<int32_t> outputValues({ 0, 0, 0, 0,
+ 0, 0, 0, 0 });
+
+ return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
+ armnn::ArgMinMaxFunction::Min,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, outputValues, 1);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 3> ArgMaxChannel4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4};
+ const armnn::TensorShape outputShape{ 1, 2, 4 };
+
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(1.0f);
+ inputTensorInfo.SetQuantizationOffset(0);
+ }
+
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+ std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 5.0f, 6.0f, 7.0f, 8.0f,
+
+ 10.0f, 20.0f, 30.0f, 40.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
+
+ 100.0f, 200.0f, 300.0f, 400.0f,
+ 500.0f, 600.0f, 700.0f, 800.0f });
+
+ std::vector<int32_t> outputValues({ 2, 2, 2, 2,
+ 2, 2, 2, 2 });
+
+ return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
+ armnn::ArgMinMaxFunction::Max,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, outputValues, 1);
+}
+
+
+// Explicit template specializations
+
+template LayerTestResult<int32_t, 3>
+ArgMaxSimpleTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinSimpleTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMaxSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinSimpleTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMaxSimpleTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinSimpleTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinChannel4dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinChannel4dTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMinChannel4dTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMaxChannel4dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMaxChannel4dTest<armnn::DataType::QuantisedAsymm8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 3>
+ArgMaxChannel4dTest<armnn::DataType::QuantisedSymm16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file