aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2018-10-24 15:33:28 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-25 09:49:58 +0100
commit70104000ddcf3bc1a1d21f16d1468456ca17b80a (patch)
treecc02dc10e0df5c3a4d194588feeb868130314e3a /src/backends/neon/test/NeonOptimizedNetworkTests.cpp
parent53e9947b99e555e3a4ed28f4b0291b3a9199f29e (diff)
downloadarmnn-70104000ddcf3bc1a1d21f16d1468456ca17b80a.tar.gz
IVGCVSW-2073: Move remaining backend-specific tests from armnn to backends
Change-Id: I45fd5b6dd32c435b78a54dc377a623e60978ce13
Diffstat (limited to 'src/backends/neon/test/NeonOptimizedNetworkTests.cpp')
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp70
1 files changed, 70 insertions, 0 deletions
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
new file mode 100644
index 0000000000..ae657ba770
--- /dev/null
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Graph.hpp>
+#include <armnn/Network.hpp>
+
+#include <backends/neon/NeonWorkloadFactory.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
+{
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+ BOOST_CHECK(optNet);
+ // validate workloads
+ armnn::NeonWorkloadFactory fact;
+ for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ {
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+ BOOST_CHECK_NO_THROW(
+ layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+{
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+
+ // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
+ armnn::NormalizationDescriptor descriptor;
+ armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
+
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
+ normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+ normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+ BOOST_CHECK(!optNet);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file