diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2018-10-24 15:33:28 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-25 09:49:58 +0100 |
commit | 70104000ddcf3bc1a1d21f16d1468456ca17b80a (patch) | |
tree | cc02dc10e0df5c3a4d194588feeb868130314e3a /src/backends/neon/test | |
parent | 53e9947b99e555e3a4ed28f4b0291b3a9199f29e (diff) | |
download | armnn-70104000ddcf3bc1a1d21f16d1468456ca17b80a.tar.gz |
IVGCVSW-2073: Move remaining backend-specific tests from armnn to backends
Change-Id: I45fd5b6dd32c435b78a54dc377a623e60978ce13
Diffstat (limited to 'src/backends/neon/test')
-rw-r--r-- | src/backends/neon/test/CMakeLists.txt | 3 | ||||
-rw-r--r-- | src/backends/neon/test/NeonEndToEndTests.cpp | 52 | ||||
-rw-r--r-- | src/backends/neon/test/NeonJsonPrinterTests.cpp | 22 | ||||
-rw-r--r-- | src/backends/neon/test/NeonOptimizedNetworkTests.cpp | 70 |
4 files changed, 147 insertions, 0 deletions
diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt index e6a28590b5..999bd4f339 100644 --- a/src/backends/neon/test/CMakeLists.txt +++ b/src/backends/neon/test/CMakeLists.txt @@ -5,9 +5,12 @@ list(APPEND armnnNeonBackendUnitTests_sources NeonCreateWorkloadTests.cpp + NeonEndToEndTests.cpp + NeonJsonPrinterTests.cpp NeonLayerSupportTests.cpp NeonLayerTests.cpp NeonMemCopyTests.cpp + NeonOptimizedNetworkTests.cpp NeonRuntimeTests.cpp NeonTimerTest.cpp ) diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp new file mode 100644 index 0000000000..f9aa8a5df5 --- /dev/null +++ b/src/backends/neon/test/NeonEndToEndTests.cpp @@ -0,0 +1,52 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <backends/test/EndToEndTestImpl.hpp> + +#include <boost/test/unit_test.hpp> + +BOOST_AUTO_TEST_SUITE(NeonEndToEnd) + +BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32) +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc}; + BOOST_TEST(ConstantUsageFloat32Test(backends)); +} + +BOOST_AUTO_TEST_CASE(FallbackToCpuRef) +{ + using namespace armnn; + + // Create runtime in which test will run and allow fallback to CpuRef. + IRuntime::CreationOptions options; + IRuntimePtr runtime(IRuntime::Create(options)); + + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* input = net->AddInputLayer(0); + + // This layer configuration isn't supported by CpuAcc but we allow fallback to CpuRef so it shoud pass. + NormalizationDescriptor descriptor; + IConnectableLayer* pooling = net->AddNormalizationLayer(descriptor); + + IConnectableLayer* output = net->AddOutputLayer(0); + + input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0)); + pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); + pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); + + // optimize the network + std::vector<BackendId> backends = {Compute::CpuAcc, Compute::CpuRef}; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + + // Load it into the runtime. It should pass. + NetworkId netId; + BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/neon/test/NeonJsonPrinterTests.cpp b/src/backends/neon/test/NeonJsonPrinterTests.cpp new file mode 100644 index 0000000000..6213c145ba --- /dev/null +++ b/src/backends/neon/test/NeonJsonPrinterTests.cpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <armnn/BackendId.hpp> + +#include <backends/test/JsonPrinterTestImpl.hpp> + +#include <boost/test/unit_test.hpp> + +#include <vector> + +BOOST_AUTO_TEST_SUITE(NeonJsonPrinter) + +BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuAccTest) +{ + std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc}; + SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJsonPrinterResult(backends); +} + +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp new file mode 100644 index 0000000000..ae657ba770 --- /dev/null +++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp @@ -0,0 +1,70 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <armnn/ArmNN.hpp> +#include <armnn/Graph.hpp> +#include <armnn/Network.hpp> + +#include <backends/neon/NeonWorkloadFactory.hpp> + +#include <boost/test/unit_test.hpp> + +BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork) + +BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback) +{ + // build up the structure of the network + armnn::INetworkPtr net(armnn::INetwork::Create()); + + armnn::IConnectableLayer* input = net->AddInputLayer(0); + armnn::IConnectableLayer* output = net->AddOutputLayer(0); + + input->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32)); + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); + BOOST_CHECK(optNet); + // validate workloads + armnn::NeonWorkloadFactory fact; + for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph()) + { + BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc); + BOOST_CHECK_NO_THROW( + layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact)); + } +} + +BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback) +{ + // build up the structure of the network + armnn::INetworkPtr net(armnn::INetwork::Create()); + + armnn::IConnectableLayer* input = net->AddInputLayer(0); + + // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null. + armnn::NormalizationDescriptor descriptor; + armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor); + + armnn::IConnectableLayer* output = net->AddOutputLayer(0); + + input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0)); + normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32)); + normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32)); + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec()); + BOOST_CHECK(!optNet); +} + +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file |