From d1f57736b0dd5168e39725f37188239f8ca024d8 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Thu, 31 Oct 2019 14:24:02 +0000 Subject: IVGCVSW-3698 Add EndToEnd Layer test for ArgMinMax * Add EndToEnd test implementation for ArgMinMax * Add EndToEnd tests for Ref * Fix output data type of ArgMinMax in WorkloadFactory Signed-off-by: Narumol Prangnawarat Change-Id: I6d07d25bb96ab21422584284046222257ddee43c --- src/backends/backendsCommon/WorkloadFactory.cpp | 2 +- .../test/ArgMinMaxEndToEndTestImpl.hpp | 308 +++++++++++++++++++++ src/backends/backendsCommon/test/CMakeLists.txt | 1 + src/backends/reference/test/RefEndToEndTests.cpp | 106 +++++++ 4 files changed, 416 insertions(+), 1 deletion(-) create mode 100644 src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 31ad5cb45a..b4b4ffca30 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -110,7 +110,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsArgMinMaxSupported( OverrideDataType(input, dataType), - OverrideDataType(output, dataType), + OverrideDataType(output, DataType::Signed32), descriptor, reason); break; diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp new file mode 100644 index 0000000000..3bb1dd6537 --- /dev/null +++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp @@ -0,0 +1,308 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "CommonTestUtils.hpp" + +#include +#include + +#include + +namespace +{ + +armnn::INetworkPtr CreateArgMinMaxNetwork(const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + armnn::ArgMinMaxFunction function, + int axis) +{ + armnn::INetworkPtr network(armnn::INetwork::Create()); + + armnn::ArgMinMaxDescriptor descriptor; + descriptor.m_Function = function; + descriptor.m_Axis = axis; + + armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input"); + armnn::IConnectableLayer* argMinMaxLayer = network->AddArgMinMaxLayer(descriptor, "ArgMinMax"); + armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output"); + + Connect(inputLayer, argMinMaxLayer, inputTensorInfo, 0, 0); + Connect(argMinMaxLayer, outputLayer, outputTensorInfo, 0, 0); + + return network; +} + +template> +void ArgMinMaxEndToEndImpl(const armnn::TensorShape& inputShape, + const armnn::TensorShape& outputShape, + const std::vector& inputData, + const std::vector& expectedOutputData, + armnn::ArgMinMaxFunction function, + int axis, + const std::vector& backends) +{ + const float qScale = armnn::IsQuantizedType() ? 2.0f : 1.0f; + const int32_t qOffset = armnn::IsQuantizedType() ? 2 : 0; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + // quantize data + std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + + armnn::INetworkPtr network = CreateArgMinMaxNetwork(inputTensorInfo, + outputTensorInfo, + function, + axis); + + EndToEndLayerTestImpl(std::move(network), + { { 0, qInputData } }, + { { 0, expectedOutputData } }, + backends); +} + +template> +void ArgMaxEndToEndSimple(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1 }; + + std::vector inputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector expectedOutputData({ 3 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Max, + -1, + backends); +} + +template> +void ArgMinEndToEndSimple(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1 }; + + std::vector inputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector expectedOutputData({ 1 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Min, + 3, + backends); +} + +template> +void ArgMaxAxis0EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 3, 2, 1, 4 }; + const armnn::TensorShape outputShape{ 2, 1, 4 }; + + std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + 10.0f, 200.0f, 30.0f, 400.0f }); + + std::vector expectedOutputData({ 1, 2, 1, 2, + 1, 1, 1, 1 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Max, + 0, + backends); +} + +template> +void ArgMinAxis0EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 3, 2, 1, 4 }; + const armnn::TensorShape outputShape{ 2, 1, 4 }; + + std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + 10.0f, 200.0f, 30.0f, 400.0f }); + + std::vector expectedOutputData({ 0, 0, 0, 0, + 0, 0, 0, 0 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Min, + 0, + backends); +} + +template> +void ArgMaxAxis1EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 2, 4 }; + + std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + 10.0f, 200.0f, 30.0f, 400.0f }); + + std::vector expectedOutputData({ 1, 2, 1, 2, + 1, 1, 1, 1 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Max, + 1, + backends); +} + +template> +void ArgMinAxis1EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 2, 4 }; + + std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + 10.0f, 200.0f, 30.0f, 400.0f }); + + std::vector expectedOutputData({ 0, 0, 0, 0, + 0, 0, 0, 0 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Min, + 1, + backends); +} + +template> +void ArgMaxAxis2EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 3, 4 }; + + std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 10.0f, 200.0f, 30.0f, 400.0f, + 50.0f, 60.0f, 70.0f, 80.0f }); + + std::vector expectedOutputData({ 1, 1, 1, 1, + 1, 1, 1, 1, + 1, 0, 1, 0}); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Max, + 2, + backends); +} + +template> +void ArgMinAxis2EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 3, 4 }; + + std::vector inputData({ 1.0f, 2.0f, 3.0f, 4.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 10.0f, 200.0f, 30.0f, 400.0f, + 50.0f, 60.0f, 70.0f, 80.0f }); + + std::vector expectedOutputData({ 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 1, 0, 1 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Min, + 2, + backends); +} + +template> +void ArgMaxAxis3EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 3, 2 }; + + std::vector inputData({ 1.0f, 3.0f, 5.0f, 7.0f, + 8.0f, 7.0f, 6.0f, 5.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 10.0f, 200.0f, 30.0f, 400.0f, + 50.0f, 60.0f, 70.0f, 80.0f }); + + std::vector expectedOutputData({ 3, 0, + 2, 0, + 3, 3}); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Max, + 3, + backends); +} + +template> +void ArgMinAxis3EndToEnd(const std::vector& backends) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 3, 2 }; + + std::vector inputData({ 1.0f, 3.0f, 5.0f, 7.0f, + 18.0f, 16.0f, 14.0f, 12.0f, + 100.0f, 20.0f, 300.0f, 40.0f, + 500.0f, 475.0f, 450.0f, 425.0f, + 10.0f, 200.0f, 30.0f, 400.0f, + 50.0f, 60.0f, 70.0f, 80.0f }); + + std::vector expectedOutputData({ 0, 3, + 1, 3, + 0, 0 }); + + ArgMinMaxEndToEndImpl(inputShape, + outputShape, + inputData, + expectedOutputData, + armnn::ArgMinMaxFunction::Min, + 3, + backends); +} + +} // anonymous namespace diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 9c86cdf3c1..8f93e084f8 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -6,6 +6,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources AbsEndToEndTestImpl.hpp ActivationFixture.hpp + ArgMinMaxEndToEndTestImpl.hpp BackendIdTests.cpp BackendRegistryTests.cpp CommonTestUtils.cpp diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 1968e4da7e..4d8c82d900 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -1041,6 +1042,111 @@ BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2) InstanceNormalizationNchwEndToEndTest2(defaultBackends); } +// ArgMinMax +BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest) +{ + ArgMaxEndToEndSimple(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test) +{ + ArgMaxEndToEndSimple(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest) +{ + ArgMinEndToEndSimple(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test) +{ + ArgMinEndToEndSimple(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test) +{ + ArgMaxAxis0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test) +{ + ArgMaxAxis0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test) +{ + ArgMinAxis0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test) +{ + + ArgMinAxis0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test) +{ + ArgMaxAxis1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test) +{ + ArgMaxAxis1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test) +{ + ArgMinAxis1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test) +{ + + ArgMinAxis1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test) +{ + ArgMaxAxis2EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test) +{ + ArgMaxAxis2EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test) +{ + ArgMinAxis2EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test) +{ + + ArgMinAxis2EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test) +{ + ArgMaxAxis3EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test) +{ + ArgMaxAxis3EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test) +{ + ArgMinAxis3EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test) +{ + + ArgMinAxis3EndToEnd(defaultBackends); +} + #if !defined(__ANDROID__) // Only run these tests on non Android platforms BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest) -- cgit v1.2.1