aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-10-31 14:24:02 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-11-01 09:01:49 +0000
commitd1f57736b0dd5168e39725f37188239f8ca024d8 (patch)
tree98160fa19ba9599cf0315133800a1e931a82e386
parent3f4d7104cdd5fd0ca6238afd4d22ac8734fd84ae (diff)
downloadarmnn-d1f57736b0dd5168e39725f37188239f8ca024d8.tar.gz
IVGCVSW-3698 Add EndToEnd Layer test for ArgMinMax
* Add EndToEnd test implementation for ArgMinMax * Add EndToEnd tests for Ref * Fix output data type of ArgMinMax in WorkloadFactory Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I6d07d25bb96ab21422584284046222257ddee43c
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp2
-rw-r--r--src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp308
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt1
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp106
4 files changed, 416 insertions, 1 deletions
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 31ad5cb45a..b4b4ffca30 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -110,7 +110,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
result = layerSupportObject->IsArgMinMaxSupported(
OverrideDataType(input, dataType),
- OverrideDataType(output, dataType),
+ OverrideDataType(output, DataType::Signed32),
descriptor,
reason);
break;
diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..3bb1dd6537
--- /dev/null
+++ b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
@@ -0,0 +1,308 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <QuantizeHelper.hpp>
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+namespace
+{
+
+armnn::INetworkPtr CreateArgMinMaxNetwork(const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ armnn::ArgMinMaxFunction function,
+ int axis)
+{
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::ArgMinMaxDescriptor descriptor;
+ descriptor.m_Function = function;
+ descriptor.m_Axis = axis;
+
+ armnn::IConnectableLayer* inputLayer = network->AddInputLayer(0, "Input");
+ armnn::IConnectableLayer* argMinMaxLayer = network->AddArgMinMaxLayer(descriptor, "ArgMinMax");
+ armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+ Connect(inputLayer, argMinMaxLayer, inputTensorInfo, 0, 0);
+ Connect(argMinMaxLayer, outputLayer, outputTensorInfo, 0, 0);
+
+ return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinMaxEndToEndImpl(const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& outputShape,
+ const std::vector<float>& inputData,
+ const std::vector<int32_t>& expectedOutputData,
+ armnn::ArgMinMaxFunction function,
+ int axis,
+ const std::vector<armnn::BackendId>& backends)
+{
+ const float qScale = armnn::IsQuantizedType<T>() ? 2.0f : 1.0f;
+ const int32_t qOffset = armnn::IsQuantizedType<T>() ? 2 : 0;
+
+ armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+ // quantize data
+ std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+
+ armnn::INetworkPtr network = CreateArgMinMaxNetwork(inputTensorInfo,
+ outputTensorInfo,
+ function,
+ axis);
+
+ EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Signed32>(std::move(network),
+ { { 0, qInputData } },
+ { { 0, expectedOutputData } },
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxEndToEndSimple(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
+ const armnn::TensorShape outputShape{ 1, 1, 1 };
+
+ std::vector<float> inputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
+ std::vector<int32_t> expectedOutputData({ 3 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Max,
+ -1,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinEndToEndSimple(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
+ const armnn::TensorShape outputShape{ 1, 1, 1 };
+
+ std::vector<float> inputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
+ std::vector<int32_t> expectedOutputData({ 1 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Min,
+ 3,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis0EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 3, 2, 1, 4 };
+ const armnn::TensorShape outputShape{ 2, 1, 4 };
+
+ std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f });
+
+ std::vector<int32_t> expectedOutputData({ 1, 2, 1, 2,
+ 1, 1, 1, 1 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Max,
+ 0,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis0EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 3, 2, 1, 4 };
+ const armnn::TensorShape outputShape{ 2, 1, 4 };
+
+ std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f });
+
+ std::vector<int32_t> expectedOutputData({ 0, 0, 0, 0,
+ 0, 0, 0, 0 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Min,
+ 0,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis1EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+ const armnn::TensorShape outputShape{ 1, 2, 4 };
+
+ std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f });
+
+ std::vector<int32_t> expectedOutputData({ 1, 2, 1, 2,
+ 1, 1, 1, 1 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Max,
+ 1,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis1EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+ const armnn::TensorShape outputShape{ 1, 2, 4 };
+
+ std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f });
+
+ std::vector<int32_t> expectedOutputData({ 0, 0, 0, 0,
+ 0, 0, 0, 0 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Min,
+ 1,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis2EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+ const armnn::TensorShape outputShape{ 1, 3, 4 };
+
+ std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f });
+
+ std::vector<int32_t> expectedOutputData({ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 1, 0, 1, 0});
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Max,
+ 2,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis2EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+ const armnn::TensorShape outputShape{ 1, 3, 4 };
+
+ std::vector<float> inputData({ 1.0f, 2.0f, 3.0f, 4.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f });
+
+ std::vector<int32_t> expectedOutputData({ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 1, 0, 1 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Min,
+ 2,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis3EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+ const armnn::TensorShape outputShape{ 1, 3, 2 };
+
+ std::vector<float> inputData({ 1.0f, 3.0f, 5.0f, 7.0f,
+ 8.0f, 7.0f, 6.0f, 5.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f });
+
+ std::vector<int32_t> expectedOutputData({ 3, 0,
+ 2, 0,
+ 3, 3});
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Max,
+ 3,
+ backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis3EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+ const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+ const armnn::TensorShape outputShape{ 1, 3, 2 };
+
+ std::vector<float> inputData({ 1.0f, 3.0f, 5.0f, 7.0f,
+ 18.0f, 16.0f, 14.0f, 12.0f,
+ 100.0f, 20.0f, 300.0f, 40.0f,
+ 500.0f, 475.0f, 450.0f, 425.0f,
+ 10.0f, 200.0f, 30.0f, 400.0f,
+ 50.0f, 60.0f, 70.0f, 80.0f });
+
+ std::vector<int32_t> expectedOutputData({ 0, 3,
+ 1, 3,
+ 0, 0 });
+
+ ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+ outputShape,
+ inputData,
+ expectedOutputData,
+ armnn::ArgMinMaxFunction::Min,
+ 3,
+ backends);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 9c86cdf3c1..8f93e084f8 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -6,6 +6,7 @@
list(APPEND armnnBackendsCommonUnitTests_sources
AbsEndToEndTestImpl.hpp
ActivationFixture.hpp
+ ArgMinMaxEndToEndTestImpl.hpp
BackendIdTests.cpp
BackendRegistryTests.cpp
CommonTestUtils.cpp
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 1968e4da7e..4d8c82d900 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -6,6 +6,7 @@
#include <backendsCommon/test/EndToEndTestImpl.hpp>
#include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
#include <backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
@@ -1041,6 +1042,111 @@ BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2)
InstanceNormalizationNchwEndToEndTest2(defaultBackends);
}
+// ArgMinMax
+BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest)
+{
+ ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
+{
+ ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
+{
+ ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
+{
+ ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
+{
+ ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
+{
+ ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
+{
+ ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
+{
+
+ ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
+{
+ ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
+{
+ ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
+{
+ ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
+{
+
+ ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
+{
+ ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
+{
+ ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
+{
+ ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
+{
+
+ ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
+{
+ ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
+{
+ ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
+{
+ ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
+{
+
+ ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
#if !defined(__ANDROID__)
// Only run these tests on non Android platforms
BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)