From b9546cf1ffde83f63436c4087711dcf098ea4196 Mon Sep 17 00:00:00 2001 From: narpra01 Date: Tue, 20 Nov 2018 15:21:28 +0000 Subject: IVGCVSW-2173 - Add end to end layer test implementation and example usage * Add CommonTestUtils * Add end to end layer test implementation * Add example usage for Merger layer on Ref, Cl, Neon Change-Id: I8931136288cd68b80bcdad8f5ae087ae1a70a60a --- src/backends/backendsCommon/test/CMakeLists.txt | 2 + .../backendsCommon/test/CommonTestUtils.hpp | 22 ++ .../backendsCommon/test/EndToEndTestImpl.hpp | 50 ++++ .../backendsCommon/test/MergerTestImpl.hpp | 302 +++++++++++++++++++++ src/backends/cl/test/ClEndToEndTests.cpp | 36 ++- src/backends/neon/test/NeonEndToEndTests.cpp | 36 ++- src/backends/reference/test/RefEndToEndTests.cpp | 58 +++- 7 files changed, 492 insertions(+), 14 deletions(-) create mode 100644 src/backends/backendsCommon/test/CommonTestUtils.hpp create mode 100644 src/backends/backendsCommon/test/MergerTestImpl.hpp (limited to 'src/backends') diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 7419c148db..962c6a52c4 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -9,6 +9,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources BackendIdTests.cpp BackendRegistryTests.cpp BatchNormTestImpl.hpp + CommonTestUtils.hpp Conv2dTestImpl.hpp ConvertFp16ToFp32TestImpl.hpp ConvertFp32ToFp16TestImpl.hpp @@ -21,6 +22,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources LayerTests.hpp LstmTestImpl.hpp NormTestImpl.hpp + MergerTestImpl.hpp OptimizedNetworkTests.cpp PermuteTestImpl.hpp Pooling2dTestImpl.hpp diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp new file mode 100644 index 0000000000..68180fb289 --- /dev/null +++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +using namespace armnn; + +namespace +{ + +// Connects two layers. +void Connect(IConnectableLayer* from, IConnectableLayer* to, const TensorInfo& tensorInfo, + unsigned int fromIndex = 0, unsigned int toIndex = 0) +{ + from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); + from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo); +} + +} diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index e16116ee10..15a3937aca 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -5,9 +5,12 @@ #pragma once #include +#include #include +#include + #include namespace @@ -99,4 +102,51 @@ inline bool ConstantUsageUint8Test(const std::vector& backends) ); } +template +void EndToEndLayerTestImpl(INetworkPtr network, + const std::map>& inputTensorData, + const std::map>& expectedOutputData, + std::vector backends) +{ + // Create runtime in which test will run + IRuntime::CreationOptions options; + IRuntimePtr runtime(IRuntime::Create(options)); + + // optimize the network + IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec()); + + // Loads it into the runtime. + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + InputTensors inputTensors; + inputTensors.reserve(inputTensorData.size()); + for (auto&& it : inputTensorData) + { + inputTensors.push_back({it.first, + ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())}); + } + OutputTensors outputTensors; + outputTensors.reserve(expectedOutputData.size()); + std::map> outputStorage; + for (auto&& it : expectedOutputData) + { + std::vector out(it.second.size()); + outputStorage.emplace(it.first, out); + outputTensors.push_back({it.first, + Tensor(runtime->GetOutputTensorInfo(netId, it.first), + outputStorage.at(it.first).data())}); + } + + // Does the inference. + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // Checks the results. + for (auto&& it : expectedOutputData) + { + std::vector out = outputStorage.at(it.first); + BOOST_TEST(it.second == out); + } +} + } // anonymous namespace \ No newline at end of file diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp new file mode 100644 index 0000000000..e0b8233336 --- /dev/null +++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp @@ -0,0 +1,302 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include + +#include + +#include + +#include + +namespace +{ + +template +INetworkPtr CreateMergerNetwork(const std::vector& inputShapes, + const TensorShape& outputShape, + unsigned int concatAxis, + const float qScale = 1.0f, + const int32_t qOffset = 0) +{ + using namespace armnn; + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + OriginsDescriptor descriptor; + + descriptor = CreateMergerDescriptorForConcatenation(inputShapes.begin(), + inputShapes.end(), + concatAxis); + IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger"); + + for (unsigned int i = 0; i < inputShapes.size(); ++i) + { + TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset); + IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast(i)); + Connect(input, merger, inputTensorInfo, 0, i); + } + + TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset); + IConnectableLayer* output = net->AddOutputLayer(0, "output"); + Connect(merger, output, outputTensorInfo, 0, 0); + + return net; +} + +template +void MergerDim0EndToEnd(const std::vector& backends) +{ + using namespace armnn; + + unsigned int concatAxis = 0; + const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; + const TensorShape& outputShape = { 4, 3, 2, 2 }; + + // Builds up the structure of the network + INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + + BOOST_TEST_CHECKPOINT("create a network"); + + // Creates structures for input & output. + std::vector inputData{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12 + }; + + std::vector expectedOutput{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12 + }; + + std::map> inputTensorData = {{ 0,inputData }, { 1,inputData }}; + std::map> expectedOutputData = {{ 0,expectedOutput }}; + + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); +} + +template +void MergerDim1EndToEnd(const std::vector& backends) +{ + using namespace armnn; + + unsigned int concatAxis = 1; + const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; + const TensorShape& outputShape = { 2, 6, 2, 2 }; + + // Builds up the structure of the network + INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + + BOOST_TEST_CHECKPOINT("create a network"); + + // Creates structures for input & output. + std::vector inputData{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12 + }; + + std::vector expectedOutput{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12 + }; + + std::map> inputTensorData = {{ 0,inputData }, { 1,inputData }}; + std::map> expectedOutputData = {{ 0,expectedOutput }}; + + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); +} + +template +void MergerDim2EndToEnd(const std::vector& backends) +{ + using namespace armnn; + + unsigned int concatAxis = 2; + const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; + const TensorShape& outputShape = { 2, 3, 4, 2 }; + + // Builds up the structure of the network + INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + + BOOST_TEST_CHECKPOINT("create a network"); + + // Creates structures for input & output. + std::vector inputData{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12 + }; + + std::vector expectedOutput{ + 1, 2, + 3, 4, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 9, 10, + 11, 12 + }; + + std::map> inputTensorData = {{ 0,inputData }, { 1,inputData }}; + std::map> expectedOutputData = {{ 0,expectedOutput }}; + + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); +} + +template +void MergerDim3EndToEnd(const std::vector& backends) +{ + using namespace armnn; + + unsigned int concatAxis = 3; + const std::vector inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }}; + const TensorShape& outputShape = { 2, 3, 2, 4 }; + + // Builds up the structure of the network + INetworkPtr net = CreateMergerNetwork()>(inputShapes, outputShape, concatAxis); + + BOOST_TEST_CHECKPOINT("create a network"); + + // Creates structures for input & output. + std::vector inputData{ + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12, + 1, 2, + 3, 4, + 5, 6, + 7, 8, + 9, 10, + 11, 12 + }; + + std::vector expectedOutput{ + 1, 2, + 1, 2, + 3, 4, + 3, 4, + 5, 6, + 5, 6, + 7, 8, + 7, 8, + 9, 10, + 9, 10, + 11, 12, + 11, 12, + 1, 2, + 1, 2, + 3, 4, + 3, 4, + 5, 6, + 5, 6, + 7, 8, + 7, 8, + 9, 10, + 9, 10, + 11, 12, + 11, 12 + }; + + std::map> inputTensorData = {{ 0,inputData }, { 1,inputData }}; + std::map> expectedOutputData = {{ 0,expectedOutput }}; + + EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); +} + +} // anonymous namespace diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index b374079e78..bf299dc0b5 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -4,15 +4,47 @@ // #include +#include #include BOOST_AUTO_TEST_SUITE(ClEndToEnd) +std::vector defaultBackends = {armnn::Compute::GpuAcc}; + BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32) { - std::vector backends = {armnn::Compute::GpuAcc}; - ConstantUsageFloat32Test(backends); + ConstantUsageFloat32Test(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test) +{ + MergerDim0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test) +{ + MergerDim0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test) +{ + MergerDim1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test) +{ + MergerDim1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test) +{ + MergerDim3EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test) +{ + MergerDim3EndToEnd(defaultBackends); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp index 3b7e30925e..3ca415a1d1 100644 --- a/src/backends/neon/test/NeonEndToEndTests.cpp +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -4,15 +4,17 @@ // #include +#include #include BOOST_AUTO_TEST_SUITE(NeonEndToEnd) +std::vector defaultBackends = {armnn::Compute::CpuAcc}; + BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32) { - std::vector backends = {armnn::Compute::CpuAcc}; - BOOST_TEST(ConstantUsageFloat32Test(backends)); + BOOST_TEST(ConstantUsageFloat32Test(defaultBackends)); } BOOST_AUTO_TEST_CASE(FallbackToCpuRef) @@ -49,4 +51,34 @@ BOOST_AUTO_TEST_CASE(FallbackToCpuRef) BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success); } +BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test) +{ + MergerDim0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test) +{ + MergerDim0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test) +{ + MergerDim1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test) +{ + MergerDim1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test) +{ + MergerDim3EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test) +{ + MergerDim3EndToEnd(defaultBackends); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index dc2ffb81ff..97bec51645 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -4,21 +4,22 @@ // #include +#include #include BOOST_AUTO_TEST_SUITE(RefEndToEnd) +std::vector defaultBackends = {armnn::Compute::CpuRef}; + BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32) { - std::vector backends = {armnn::Compute::CpuRef}; - BOOST_TEST(ConstantUsageFloat32Test(backends)); + BOOST_TEST(ConstantUsageFloat32Test(defaultBackends)); } BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8) { - std::vector backends = {armnn::Compute::CpuRef}; - BOOST_TEST(ConstantUsageUint8Test(backends)); + BOOST_TEST(ConstantUsageUint8Test(defaultBackends)); } BOOST_AUTO_TEST_CASE(Unsigned8) @@ -51,8 +52,7 @@ BOOST_AUTO_TEST_CASE(Unsigned8) softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec()); // Loads it into the runtime. NetworkId netId; @@ -115,8 +115,7 @@ BOOST_AUTO_TEST_CASE(TrivialAdd) add->GetOutputSlot(0).SetTensorInfo(tensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec()); // Loads it into the runtime. NetworkId netId; @@ -214,8 +213,7 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs) activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo); // optimize the network - std::vector backends = {armnn::Compute::CpuRef}; - IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec()); // Loads it into the runtime. NetworkId netId; @@ -248,4 +246,44 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs) BOOST_TEST(output3Data == std::vector({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5] } +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test) +{ + MergerDim0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test) +{ + MergerDim0EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test) +{ + MergerDim1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test) +{ + MergerDim1EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test) +{ + MergerDim2EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test) +{ + MergerDim2EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test) +{ + MergerDim3EndToEnd(defaultBackends); +} + +BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test) +{ + MergerDim3EndToEnd(defaultBackends); +} + BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1